prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def <|fim_middle|>(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | write |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def <|fim_middle|>(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _get_buffer |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def <|fim_middle|>(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _get_target |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def <|fim_middle|>(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _save_buffer |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def <|fim_middle|>():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _get_exporter |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def <|fim_middle|>(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _export |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def <|fim_middle|>(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _save_buffer |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def <|fim_middle|>(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _export |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def <|fim_middle|>(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _get_buffer |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def <|fim_middle|>(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _get_target |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def <|fim_middle|>(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _save_buffer |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def <|fim_middle|>():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def _export(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _get_exporter |
<|file_name|>io.py<|end_file_name|><|fim▁begin|>from PyQt4 import QtGui, QtCore, QtSvg
from PyQt4.QtCore import QMimeData
from PyQt4.QtGui import QGraphicsScene, QGraphicsView, QWidget, QApplication
from Orange.data.io import FileFormat
class ImgFormat(FileFormat):
@staticmethod
def _get_buffer(size, filename):
raise NotImplementedError
@staticmethod
def _get_target(scene, painter, buffer):
raise NotImplementedError
@staticmethod
def _save_buffer(buffer, filename):
raise NotImplementedError
@staticmethod
def _get_exporter():
raise NotImplementedError
@staticmethod
def _export(self, exporter, filename):
raise NotImplementedError
@classmethod
def write_image(cls, filename, scene):
try:
scene = scene.scene()
scenerect = scene.sceneRect() #preserve scene bounding rectangle
viewrect = scene.views()[0].sceneRect()
scene.setSceneRect(viewrect)
backgroundbrush = scene.backgroundBrush() #preserve scene background brush
scene.setBackgroundBrush(QtCore.Qt.white)
exporter = cls._get_exporter()
cls._export(exporter(scene), filename)
scene.setBackgroundBrush(backgroundbrush) # reset scene background brush
scene.setSceneRect(scenerect) # reset scene bounding rectangle
except Exception:
if isinstance(scene, (QGraphicsScene, QGraphicsView)):
rect = scene.sceneRect()
elif isinstance(scene, QWidget):
rect = scene.rect()
rect = rect.adjusted(-15, -15, 15, 15)
buffer = cls._get_buffer(rect.size(), filename)
painter = QtGui.QPainter()
painter.begin(buffer)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
target = cls._get_target(scene, painter, buffer, rect)
try:
scene.render(painter, target, rect)
except TypeError:
scene.render(painter) # PyQt4 QWidget.render() takes different params
cls._save_buffer(buffer, filename)
painter.end()
@classmethod
def write(cls, filename, scene):
if type(scene) == dict:
scene = scene['scene']
cls.write_image(filename, scene)
class PngFormat(ImgFormat):
EXTENSIONS = ('.png',)
DESCRIPTION = 'Portable Network Graphics'
PRIORITY = 50
@staticmethod
def _get_buffer(size, filename):
return QtGui.QPixmap(int(size.width()), int(size.height()))
@staticmethod
def _get_target(scene, painter, buffer, source):
try:
brush = scene.backgroundBrush()
if brush.style() == QtCore.Qt.NoBrush:
brush = QtGui.QBrush(scene.palette().color(QtGui.QPalette.Base))
except AttributeError: # not a QGraphicsView/Scene
brush = QtGui.QBrush(QtCore.Qt.white)
painter.fillRect(buffer.rect(), brush)
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
buffer.save(filename, "png")
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.ImageExporter import ImageExporter
return ImageExporter
@staticmethod
def _export(exporter, filename):
buffer = exporter.export(toBytes=True)
buffer.save(filename, "png")
class ClipboardFormat(PngFormat):
EXTENSIONS = ()
DESCRIPTION = 'System Clipboard'
PRIORITY = 50
@staticmethod
def _save_buffer(buffer, _):
QApplication.clipboard().setPixmap(buffer)
@staticmethod
def _export(exporter, _):
buffer = exporter.export(toBytes=True)
mimedata = QMimeData()
mimedata.setData("image/png", buffer)
QApplication.clipboard().setMimeData(mimedata)
class SvgFormat(ImgFormat):
EXTENSIONS = ('.svg',)
DESCRIPTION = 'Scalable Vector Graphics'
PRIORITY = 100
@staticmethod
def _get_buffer(size, filename):
buffer = QtSvg.QSvgGenerator()
buffer.setFileName(filename)
buffer.setSize(QtCore.QSize(int(size.width()), int(size.height())))
return buffer
@staticmethod
def _get_target(scene, painter, buffer, source):
return QtCore.QRectF(0, 0, source.width(), source.height())
@staticmethod
def _save_buffer(buffer, filename):
pass
@staticmethod
def _get_exporter():
from pyqtgraph.exporters.SVGExporter import SVGExporter
return SVGExporter
@staticmethod
def <|fim_middle|>(exporter, filename):
exporter.export(filename)
<|fim▁end|> | _export |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:<|fim▁hole|> item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)<|fim▁end|> | |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
<|fim_middle|>
<|fim▁end|> | def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result) |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
<|fim_middle|>
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener) |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
<|fim_middle|>
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | return ['resolve','categories','list'] |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
<|fim_middle|>
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
<|fim_middle|>
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
<|fim_middle|>
<|fim▁end|> | item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result) |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
<|fim_middle|>
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | return result[0] |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
<|fim_middle|>
<|fim▁end|> | return select_cb(result) |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def <|fim_middle|>(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | __init__ |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def <|fim_middle|>(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | capabilities |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def <|fim_middle|>(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | categories |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def <|fim_middle|>(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | list |
<|file_name|>hejbejse.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2011 Ivo Brhel
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,os,urllib,urllib2,cookielib
import util,resolver
from provider import ContentProvider
class HejbejseContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'hejbejse.tv','http://www.kynychova-tv.cz/',username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['resolve','categories','list']
def categories(self):
page = util.parse_html('http://www.kynychova-tv.cz/index.php?id=5')
result = []
for title,uri in [(x.h3.text,x.h3.a['href']) for x in page.select('div.entry5') if x.h3]:
item = self.dir_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def list(self, url):
url = self._url(url)
page = util.parse_html(url)
result = []
for title,uri in [(x.img['title'],x['href']) for x in page.select('div.entry3')[0].findAll('a')]:
item = self.video_item()
item['title'] = title
item['url'] = uri
result.append(item)
return result
def <|fim_middle|>(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
page = util.parse_html(url)
result = []
data=str(page.select('div.entry3 > center')[0])
resolved = resolver.findstreams(data,['<iframe(.+?)src=[\"\'](?P<url>.+?)[\'\"]'])
try:
for i in resolved:
item = self.video_item()
item['title'] = i['name']
item['url'] = i['url']
item['quality'] = i['quality']
item['surl'] = i['surl']
result.append(item)
except:
print '===Unknown resolver==='
if len(result)==1:
return result[0]
elif len(result) > 1 and select_cb:
return select_cb(result)
<|fim▁end|> | resolve |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],<|fim▁hole|> "msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}<|fim▁end|> | |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
<|fim_middle|>
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
<|fim_middle|>
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
<|fim_middle|>
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
<|fim_middle|>
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14} |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
<|fim_middle|>
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1} |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
<|fim_middle|>
<|fim▁end|> | path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"} |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def <|fim_middle|>():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | test_openapi_schema |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def <|fim_middle|>():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | test_post_form_no_body |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def <|fim_middle|>():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | test_post_body_json |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def <|fim_middle|>(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | test_post_file |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def <|fim_middle|>(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def test_post_upload_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | test_post_large_file |
<|file_name|>test_tutorial001.py<|end_file_name|><|fim▁begin|>from fastapi.testclient import TestClient
from docs_src.request_files.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create File",
"operationId": "create_file_files__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_file_files__post"
}
}
},
"required": True,
},
}
},
"/uploadfile/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Upload File",
"operationId": "create_upload_file_uploadfile__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": {
"$ref": "#/components/schemas/Body_create_upload_file_uploadfile__post"
}
}
},
"required": True,
},
}
},
},
"components": {
"schemas": {
"Body_create_upload_file_uploadfile__post": {
"title": "Body_create_upload_file_uploadfile__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"Body_create_file_files__post": {
"title": "Body_create_file_files__post",
"required": ["file"],
"type": "object",
"properties": {
"file": {"title": "File", "type": "string", "format": "binary"}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
file_required = {
"detail": [
{
"loc": ["body", "file"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_post_form_no_body():
response = client.post("/files/")
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_body_json():
response = client.post("/files/", json={"file": "Foo"})
assert response.status_code == 422, response.text
assert response.json() == file_required
def test_post_file(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": 14}
def test_post_large_file(tmp_path):
default_pydantic_max_size = 2 ** 16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/files/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"file_size": default_pydantic_max_size + 1}
def <|fim_middle|>(tmp_path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
client = TestClient(app)
with path.open("rb") as file:
response = client.post("/uploadfile/", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"filename": "test.txt"}
<|fim▁end|> | test_post_upload_file |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib<|fim▁hole|>IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())<|fim▁end|> | import json
import os
|
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
<|fim_middle|>
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | """Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest() |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
<|fim_middle|>
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | return os.path.join(*(full_path.split(os.path.sep)[1:])) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
<|fim_middle|>
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
<|fim_middle|>
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
<|fim_middle|>
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
<|fim_middle|>
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
<|fim_middle|>
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
<|fim_middle|>
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
<|fim_middle|>
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
<|fim_middle|>
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
<|fim_middle|>
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1 |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
<|fim_middle|>
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1 |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
<|fim_middle|>
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1 |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
<|fim_middle|>
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1 |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args()) |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def <|fim_middle|>(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | md5 |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def <|fim_middle|>(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | strip_first_dir |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def <|fim_middle|>(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | make_irmas_index |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def <|fim_middle|>(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def main(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | make_irmas_test_index |
<|file_name|>make_irmas_index.py<|end_file_name|><|fim▁begin|>import argparse
import glob
import hashlib
import json
import os
IRMAS_INDEX_PATH = '../mirdata/indexes/irmas_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def strip_first_dir(full_path):
return os.path.join(*(full_path.split(os.path.sep)[1:]))
def make_irmas_index(irmas_data_path):
count = 0
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Train' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
if 'dru' in file:
irmas_id_dru = file.split(']')[3] # Obtain id
irmas_id_dru_no_wav = irmas_id_dru.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_dru_no_wav] = os.path.join(
directory, directory_, file
)
if 'nod' in file:
irmas_id_nod = file.split(']')[3] # Obtain id
irmas_id_nod_no_wav = irmas_id_nod.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_nod_no_wav] = os.path.join(
directory, directory_, file
)
else:
irmas_id = file.split(']')[2] # Obtain id
irmas_id_no_wav = irmas_id.split('.')[
0
] # Obtain id without '.wav'
irmas_dict[irmas_id_no_wav] = os.path.join(
directory, directory_, file
)
irmas_test_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_test_dict[count] = [file_name, track_name]
count += 1
irmas_id_list = sorted(irmas_dict.items()) # Sort strokes by id
irmas_index = {}
for inst in irmas_id_list:
print(inst[1])
audio_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[inst[0]] = {
'audio': (inst[1], audio_checksum),
'annotation': (inst[1], audio_checksum),
}
index = 1
for inst in irmas_test_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def make_irmas_test_index(irmas_data_path):
count = 1
irmas_dict = dict()
for root, dirs, files in os.walk(irmas_data_path):
for directory in dirs:
if 'Test' in directory:
for root_, dirs_, files_ in os.walk(
os.path.join(irmas_data_path, directory)
):
for directory_ in dirs_:
for root__, dirs__, files__ in os.walk(
os.path.join(irmas_data_path, directory, directory_)
):
for file in files__:
if file.endswith('.wav'):
file_name = os.path.join(
directory, directory_, file
)
track_name = str(file_name.split('.wa')[0]) + '.txt'
irmas_dict[count] = [file_name, track_name]
count += 1
irmas_index = {}
index = 1
for inst in irmas_dict.values():
audio_checksum = md5(os.path.join(irmas_data_path, inst[0]))
annotation_checksum = md5(os.path.join(irmas_data_path, inst[1]))
irmas_index[index] = {
'audio': (inst[0], audio_checksum),
'annotation': (inst[1], annotation_checksum),
}
index += 1
with open(IRMAS_TEST_INDEX_PATH, 'w') as fhandle:
json.dump(irmas_index, fhandle, indent=2)
def <|fim_middle|>(args):
make_irmas_index(args.irmas_data_path)
# make_irmas_test_index(args.irmas_data_path)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make IRMAS index file.')
PARSER.add_argument('irmas_data_path', type=str, help='Path to IRMAS data folder.')
main(PARSER.parse_args())
<|fim▁end|> | main |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],<|fim▁hole|> bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)<|fim▁end|> | |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
<|fim_middle|>
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
<|fim_middle|>
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
<|fim_middle|>
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
<|fim_middle|>
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter])) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
<|fim_middle|>
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
<|fim_middle|>
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
<|fim_middle|>
<|fim▁end|> | if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
<|fim_middle|>
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | return "Could not find book name: " + book_name |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
<|fim_middle|>
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | return 'Reference not in form "Book Chapter:Passage"' |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
<|fim_middle|>
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | return "Chapter must be an int" |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
<|fim_middle|>
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | return "Passage must be an int" |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
<|fim_middle|>
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | return "Verse or chapter out of range" |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
<|fim_middle|>
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
<|fim_middle|>
<|fim▁end|> | bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type) |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def <|fim_middle|>(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | make_book_to_index |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def <|fim_middle|>():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | plugin_setup |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def <|fim_middle|>(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | help_command |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def <|fim_middle|>(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | is_valid_quote |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def <|fim_middle|>(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | get_quote |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def <|fim_middle|>(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | get_quote_from_ref |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def <|fim_middle|>(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
<|fim▁end|> | passage_command |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
<|fim▁hole|>EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)<|fim▁end|> | BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163 |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
<|fim_middle|>
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
<|fim▁end|> | return RSA.generate(bits) # NOT OK |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
<|fim_middle|>
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
<|fim▁end|> | return RSA.generate(bits) # OK |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
<|fim_middle|>
<|fim▁end|> | return RSA.generate(bits) |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
<|fim_middle|>
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
<|fim▁end|> | from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
<|fim_middle|>
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
<|fim▁end|> | from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def <|fim_middle|>(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
<|fim▁end|> | make_new_rsa_key_weak |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def <|fim_middle|>(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def only_used_by_test(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
<|fim▁end|> | make_new_rsa_key_strong |
<|file_name|>weak_crypto.py<|end_file_name|><|fim▁begin|>from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import ec, dsa, rsa
# Crypto and Cryptodome have same API
if random():
from Crypto.PublicKey import DSA
from Crypto.PublicKey import RSA
else:
from Cryptodome.PublicKey import DSA
from Cryptodome.PublicKey import RSA
RSA_WEAK = 1024
RSA_OK = 2048
RSA_STRONG = 3076
DSA_WEAK = 1024
DSA_OK = 2048
DSA_STRONG = 3076
BIG = 10000
EC_WEAK = ec.SECT163K1() # has key size of 163
EC_OK = ec.SECP224R1()
EC_STRONG = ec.SECP384R1()
EC_BIG = ec.SECT571R1()
dsa_gen_key = dsa.generate_private_key
ec_gen_key = ec.generate_private_key
rsa_gen_key = rsa.generate_private_key
# Strong and OK keys.
dsa_gen_key(key_size=DSA_OK)
dsa_gen_key(key_size=DSA_STRONG)
dsa_gen_key(key_size=BIG)
ec_gen_key(curve=EC_OK)
ec_gen_key(curve=EC_STRONG)
ec_gen_key(curve=EC_BIG)
rsa_gen_key(public_exponent=65537, key_size=RSA_OK)
rsa_gen_key(public_exponent=65537, key_size=RSA_STRONG)
rsa_gen_key(public_exponent=65537, key_size=BIG)
DSA.generate(bits=RSA_OK)
DSA.generate(bits=RSA_STRONG)
RSA.generate(bits=RSA_OK)
RSA.generate(bits=RSA_STRONG)
dsa_gen_key(DSA_OK)
dsa_gen_key(DSA_STRONG)
dsa_gen_key(BIG)
ec_gen_key(EC_OK)
ec_gen_key(EC_STRONG)
ec_gen_key(EC_BIG)
rsa_gen_key(65537, RSA_OK)
rsa_gen_key(65537, RSA_STRONG)
rsa_gen_key(65537, BIG)
DSA.generate(DSA_OK)
DSA.generate(DSA_STRONG)
RSA.generate(RSA_OK)
RSA.generate(RSA_STRONG)
# Weak keys
dsa_gen_key(DSA_WEAK)
ec_gen_key(EC_WEAK)
rsa_gen_key(65537, RSA_WEAK)
dsa_gen_key(key_size=DSA_WEAK)
ec_gen_key(curve=EC_WEAK)
rsa_gen_key(65537, key_size=RSA_WEAK)
DSA.generate(DSA_WEAK)
RSA.generate(RSA_WEAK)
# ------------------------------------------------------------------------------
# Through function calls
def make_new_rsa_key_weak(bits):
return RSA.generate(bits) # NOT OK
make_new_rsa_key_weak(RSA_WEAK)
def make_new_rsa_key_strong(bits):
return RSA.generate(bits) # OK
make_new_rsa_key_strong(RSA_STRONG)
def <|fim_middle|>(bits):
# Although this call will technically not be ok, since it's only used in a test, we don't want to alert on it.
return RSA.generate(bits)
<|fim▁end|> | only_used_by_test |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
<|fim▁hole|> pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass<|fim▁end|> |
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """ |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
<|fim_middle|>
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
<|fim▁end|> | """ Incorrect HTTP API arguments """
pass |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
<|fim_middle|>
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
<|fim▁end|> | """ Error rendering page """
pass |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
<|fim_middle|>
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
<|fim▁end|> | """ Unhandled internal error """
pass |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
<|fim_middle|>
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
<|fim▁end|> | """ Timeout exceeded rendering page """
pass |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class UnsupportedContentType(Exception):
<|fim_middle|>
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
<|fim▁end|> | """ Request Content-Type is not supported """
pass |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
<|fim_middle|>
class ScriptError(BadOption):
""" Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR'
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
<|fim▁end|> | """ Arguments stored with ``save_args`` are expired """
pass |
<|file_name|>exceptions.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import
class BadOption(Exception):
""" Incorrect HTTP API arguments """
pass
class RenderError(Exception):
""" Error rendering page """
pass
class InternalError(Exception):
""" Unhandled internal error """
pass
class GlobalTimeoutError(Exception):
""" Timeout exceeded rendering page """
pass
class UnsupportedContentType(Exception):
""" Request Content-Type is not supported """
pass
class ExpiredArguments(Exception):
""" Arguments stored with ``save_args`` are expired """
pass
class ScriptError(BadOption):
<|fim_middle|>
class JsError(Exception):
""" Error occured in JavaScript code """
pass
class OneShotCallbackError(Exception):
""" A one shot callback was called more than once. """
pass
<|fim▁end|> | """ Error happened while executing Lua script """
LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts
LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine
LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python
SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash
BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect
MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found
SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now
JS_ERROR = 'JS_ERROR' # error in a wrapped JS function
UNKNOWN_ERROR = 'UNKNOWN_ERROR' |