docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Create our state object.
Parameters:
---------------------------------------------------------------------
hsObj: Reference to the HypersesarchV2 instance
cjDAO: ClientJobsDAO instance
logger: logger to use
jobID: our JobID | def __init__(self, hsObj):
# Save constructor parameters
self._hsObj = hsObj
# Convenient access to the logger
self.logger = self._hsObj.logger
# This contains our current state, and local working changes
self._state = None
# This contains the state we last read from the database
self._priorStateJSON = None
# Set when we make a change to our state locally
self._dirty = False
# Read in the initial state
self.readStateFromDB() | 108,464 |
Set our state to that obtained from the engWorkerState field of the
job record.
Parameters:
---------------------------------------------------------------------
stateJSON: JSON encoded state from job record | def readStateFromDB(self):
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(self._hsObj._jobID,
['engWorkerState'])[0]
# Init if no prior state yet
if self._priorStateJSON is None:
swarms = dict()
# Fast Swarm, first and only sprint has one swarm for each field
# in fixedFields
if self._hsObj._fixedFields is not None:
print self._hsObj._fixedFields
encoderSet = []
for field in self._hsObj._fixedFields:
if field =='_classifierInput':
continue
encoderName = self.getEncoderKeyFromName(field)
assert encoderName in self._hsObj._encoderNames, "The field '%s' " \
" specified in the fixedFields list is not present in this " \
" model." % (field)
encoderSet.append(encoderName)
encoderSet.sort()
swarms['.'.join(encoderSet)] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Temporal prediction search, first sprint has N swarms of 1 field each,
# the predicted field may or may not be that one field.
elif self._hsObj._searchType == HsSearchType.temporal:
for encoderName in self._hsObj._encoderNames:
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Classification prediction search, first sprint has N swarms of 1 field
# each where this field can NOT be the predicted field.
elif self._hsObj._searchType == HsSearchType.classification:
for encoderName in self._hsObj._encoderNames:
if encoderName == self._hsObj._predictedFieldEncoder:
continue
swarms[encoderName] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
# Legacy temporal. This is either a model that uses reconstruction or
# an older multi-step model that doesn't have a separate
# 'classifierOnly' encoder for the predicted field. Here, the predicted
# field must ALWAYS be present and the first sprint tries the predicted
# field only
elif self._hsObj._searchType == HsSearchType.legacyTemporal:
swarms[self._hsObj._predictedFieldEncoder] = {
'status': 'active',
'bestModelId': None,
'bestErrScore': None,
'sprintIdx': 0,
}
else:
raise RuntimeError("Unsupported search type: %s" % \
(self._hsObj._searchType))
# Initialize the state.
self._state = dict(
# The last time the state was updated by a worker.
lastUpdateTime = time.time(),
# Set from within setSwarmState() if we detect that the sprint we just
# completed did worse than a prior sprint. This stores the index of
# the last good sprint.
lastGoodSprint = None,
# Set from within setSwarmState() if lastGoodSprint is True and all
# sprints have completed.
searchOver = False,
# This is a summary of the active swarms - this information can also
# be obtained from the swarms entry that follows, but is summarized here
# for easier reference when viewing the state as presented by
# log messages and prints of the hsState data structure (by
# permutations_runner).
activeSwarms = swarms.keys(),
# All the swarms that have been created so far.
swarms = swarms,
# All the sprints that have completed or are in progress.
sprints = [{'status': 'active',
'bestModelId': None,
'bestErrScore': None}],
# The list of encoders we have "blacklisted" because they
# performed so poorly.
blackListedEncoders = [],
)
# This will do nothing if the value of engWorkerState is not still None.
self._hsObj._cjDAO.jobSetFieldIfEqual(
self._hsObj._jobID, 'engWorkerState', json.dumps(self._state), None)
self._priorStateJSON = self._hsObj._cjDAO.jobGetFields(
self._hsObj._jobID, ['engWorkerState'])[0]
assert (self._priorStateJSON is not None)
# Read state from the database
self._state = json.loads(self._priorStateJSON)
self._dirty = False | 108,465 |
Return the field contributions statistics.
Parameters:
---------------------------------------------------------------------
retval: Dictionary where the keys are the field names and the values
are how much each field contributed to the best score. | def getFieldContributions(self):
#in the fast swarm, there is only 1 sprint and field contributions are
#not defined
if self._hsObj._fixedFields is not None:
return dict(), dict()
# Get the predicted field encoder name
predictedEncoderName = self._hsObj._predictedFieldEncoder
# -----------------------------------------------------------------------
# Collect all the single field scores
fieldScores = []
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 1:
continue
field = self.getEncoderNameFromKey(encodersUsed[0])
bestScore = info['bestErrScore']
# If the bestScore is None, this swarm hasn't completed yet (this could
# happen if we're exiting because of maxModels), so look up the best
# score so far
if bestScore is None:
(_modelId, bestScore) = \
self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
fieldScores.append((bestScore, field))
# -----------------------------------------------------------------------
# If we only have 1 field that was tried in the first sprint, then use that
# as the base and get the contributions from the fields in the next sprint.
if self._hsObj._searchType == HsSearchType.legacyTemporal:
assert(len(fieldScores)==1)
(baseErrScore, baseField) = fieldScores[0]
for swarmId, info in self._state['swarms'].iteritems():
encodersUsed = swarmId.split('.')
if len(encodersUsed) != 2:
continue
fields = [self.getEncoderNameFromKey(name) for name in encodersUsed]
fields.remove(baseField)
fieldScores.append((info['bestErrScore'], fields[0]))
# The first sprint tried a bunch of fields, pick the worst performing one
# (within the top self._hsObj._maxBranching ones) as the base
else:
fieldScores.sort(reverse=True)
# If maxBranching was specified, pick the worst performing field within
# the top maxBranching+1 fields as our base, which will give that field
# a contribution of 0.
if self._hsObj._maxBranching > 0 \
and len(fieldScores) > self._hsObj._maxBranching:
baseErrScore = fieldScores[-self._hsObj._maxBranching-1][0]
else:
baseErrScore = fieldScores[0][0]
# -----------------------------------------------------------------------
# Prepare and return the fieldContributions dict
pctFieldContributionsDict = dict()
absFieldContributionsDict = dict()
# If we have no base score, can't compute field contributions. This can
# happen when we exit early due to maxModels or being cancelled
if baseErrScore is not None:
# If the base error score is 0, we can't compute a percent difference
# off of it, so move it to a very small float
if abs(baseErrScore) < 0.00001:
baseErrScore = 0.00001
for (errScore, field) in fieldScores:
if errScore is not None:
pctBetter = (baseErrScore - errScore) * 100.0 / baseErrScore
else:
pctBetter = 0.0
errScore = baseErrScore # for absFieldContribution
pctFieldContributionsDict[field] = pctBetter
absFieldContributionsDict[field] = baseErrScore - errScore
self.logger.debug("FieldContributions: %s" % (pctFieldContributionsDict))
return pctFieldContributionsDict, absFieldContributionsDict | 108,467 |
Return the list of all swarms in the given sprint.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids in the given sprint | def getAllSwarms(self, sprintIdx):
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['sprintIdx'] == sprintIdx:
swarmIds.append(swarmId)
return swarmIds | 108,468 |
Return the list of all completed swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids | def getCompletedSwarms(self):
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completed':
swarmIds.append(swarmId)
return swarmIds | 108,469 |
Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids | def getCompletingSwarms(self):
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId)
return swarmIds | 108,470 |
Return the best model ID and it's errScore from the given sprint,
which may still be in progress. This returns the best score from all models
in the sprint which have matured so far.
Parameters:
---------------------------------------------------------------------
retval: (modelId, errScore) | def bestModelInSprint(self, sprintIdx):
# Get all the swarms in this sprint
swarms = self.getAllSwarms(sprintIdx)
# Get the best model and score from each swarm
bestModelId = None
bestErrScore = numpy.inf
for swarmId in swarms:
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
if errScore < bestErrScore:
bestModelId = modelId
bestErrScore = errScore
return (bestModelId, bestErrScore) | 108,471 |
Change the given swarm's state to 'newState'. If 'newState' is
'completed', then bestModelId and bestErrScore must be provided.
Parameters:
---------------------------------------------------------------------
swarmId: swarm Id
newStatus: new status, either 'active', 'completing', 'completed', or
'killed' | def setSwarmState(self, swarmId, newStatus):
assert (newStatus in ['active', 'completing', 'completed', 'killed'])
# Set the swarm status
swarmInfo = self._state['swarms'][swarmId]
if swarmInfo['status'] == newStatus:
return
# If some other worker noticed it as completed, setting it to completing
# is obviously old information....
if swarmInfo['status'] == 'completed' and newStatus == 'completing':
return
self._dirty = True
swarmInfo['status'] = newStatus
if newStatus == 'completed':
(modelId, errScore) = self._hsObj._resultsDB.bestModelIdAndErrScore(swarmId)
swarmInfo['bestModelId'] = modelId
swarmInfo['bestErrScore'] = errScore
# If no longer active, remove it from the activeSwarms entry
if newStatus != 'active' and swarmId in self._state['activeSwarms']:
self._state['activeSwarms'].remove(swarmId)
# If new status is 'killed', kill off any running particles in that swarm
if newStatus=='killed':
self._hsObj.killSwarmParticles(swarmId)
# In case speculative particles are enabled, make sure we generate a new
# swarm at this time if all of the swarms in the current sprint have
# completed. This will insure that we don't mark the sprint as completed
# before we've created all the possible swarms.
sprintIdx = swarmInfo['sprintIdx']
self.isSprintActive(sprintIdx)
# Update the sprint status. Check all the swarms that belong to this sprint.
# If they are all completed, the sprint is completed.
sprintInfo = self._state['sprints'][sprintIdx]
statusCounts = dict(active=0, completing=0, completed=0, killed=0)
bestModelIds = []
bestErrScores = []
for info in self._state['swarms'].itervalues():
if info['sprintIdx'] != sprintIdx:
continue
statusCounts[info['status']] += 1
if info['status'] == 'completed':
bestModelIds.append(info['bestModelId'])
bestErrScores.append(info['bestErrScore'])
if statusCounts['active'] > 0:
sprintStatus = 'active'
elif statusCounts['completing'] > 0:
sprintStatus = 'completing'
else:
sprintStatus = 'completed'
sprintInfo['status'] = sprintStatus
# If the sprint is complete, get the best model from all of its swarms and
# store that as the sprint best
if sprintStatus == 'completed':
if len(bestErrScores) > 0:
whichIdx = numpy.array(bestErrScores).argmin()
sprintInfo['bestModelId'] = bestModelIds[whichIdx]
sprintInfo['bestErrScore'] = bestErrScores[whichIdx]
else:
# This sprint was empty, most likely because all particles were
# killed. Give it a huge error score
sprintInfo['bestModelId'] = 0
sprintInfo['bestErrScore'] = numpy.inf
# See if our best err score got NO BETTER as compared to a previous
# sprint. If so, stop exploring subsequent sprints (lastGoodSprint
# is no longer None).
bestPrior = numpy.inf
for idx in range(sprintIdx):
if self._state['sprints'][idx]['status'] == 'completed':
(_, errScore) = self.bestModelInCompletedSprint(idx)
if errScore is None:
errScore = numpy.inf
else:
errScore = numpy.inf
if errScore < bestPrior:
bestPrior = errScore
if sprintInfo['bestErrScore'] >= bestPrior:
self._state['lastGoodSprint'] = sprintIdx-1
# If ALL sprints up to the last good one are done, the search is now over
if self._state['lastGoodSprint'] is not None \
and not self.anyGoodSprintsActive():
self._state['searchOver'] = True | 108,472 |
Recursively applies f to the values in dict d.
Args:
d: The dict to recurse over.
f: A function to apply to values in d that takes the value and a list of
keys from the root of the dict to the value. | def rApply(d, f):
remainingDicts = [(d, ())]
while len(remainingDicts) > 0:
current, prevKeys = remainingDicts.pop()
for k, v in current.iteritems():
keys = prevKeys + (k,)
if isinstance(v, dict):
remainingDicts.insert(0, (v, keys))
else:
f(v, keys) | 108,522 |
Do one iteration of inference and/or learning and return the result
Parameters:
--------------------------------------------
rfInput: Input vector. Shape is: (1, inputVectorLen).
resetSignal: True if reset is asserted | def _doBottomUpCompute(self, rfInput, resetSignal):
# Conditional compute break
self._conditionalBreak()
# Save the rfInput for the spInputNonZeros parameter
self._spatialPoolerInput = rfInput.reshape(-1)
assert(rfInput.shape[0] == 1)
# Run inference using the spatial pooler. We learn on the coincidences only
# if we are in learning mode and trainingStep is set appropriately.
# Run SFDR bottom-up compute and cache output in self._spatialPoolerOutput
inputVector = numpy.array(rfInput[0]).astype('uint32')
outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32')
self._sfdr.compute(inputVector, self.learningMode, outputVector)
self._spatialPoolerOutput[:] = outputVector[:]
# Direct logging of SP outputs if requested
if self._fpLogSP:
output = self._spatialPoolerOutput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSP, output.size, outStr
# Direct logging of SP inputs
if self._fpLogSPInput:
output = rfInput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSPInput, output.size, outStr
return self._spatialPoolerOutput | 108,582 |
Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer | def _isInt(x, precision = 0.0001):
xInt = int(round(x))
return (abs(x - xInt) < precision * x, xInt) | 108,644 |
Returns the experiment description schema. This implementation loads it in
from file experimentDescriptionSchema.json.
Parameters:
--------------------------------------------------------------------------
Returns: returns a dict representing the experiment description schema. | def _getExperimentDescriptionSchema():
installPath = os.path.dirname(os.path.abspath(__file__))
schemaFilePath = os.path.join(installPath, "experimentDescriptionSchema.json")
return json.loads(open(schemaFilePath, 'r').read()) | 108,654 |
Generates the Metrics for a given InferenceType
Parameters:
-------------------------------------------------------------------------
options: ExpGenerator options
retval: (metricsList, optimizeMetricLabel)
metricsList: list of metric string names
optimizeMetricLabel: Name of the metric which to optimize over | def _generateMetricSpecs(options):
inferenceType = options['inferenceType']
inferenceArgs = options['inferenceArgs']
predictionSteps = inferenceArgs['predictionSteps']
metricWindow = options['metricWindow']
if metricWindow is None:
metricWindow = int(Configuration.get("nupic.opf.metricWindow"))
metricSpecStrings = []
optimizeMetricLabel = ""
# -----------------------------------------------------------------------
# Generate the metrics specified by the expGenerator paramters
metricSpecStrings.extend(_generateExtraMetricSpecs(options))
# -----------------------------------------------------------------------
optimizeMetricSpec = None
# If using a dynamically computed prediction steps (i.e. when swarming
# over aggregation is requested), then we will plug in the variable
# predictionSteps in place of the statically provided predictionSteps
# from the JSON description.
if options['dynamicPredictionSteps']:
assert len(predictionSteps) == 1
predictionSteps = ['$REPLACE_ME']
# -----------------------------------------------------------------------
# Metrics for temporal prediction
if inferenceType in (InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
'MultiStep'):
predictedFieldName, predictedFieldType = _getPredictedField(options)
isCategory = _isCategory(predictedFieldType)
metricNames = ('avg_err',) if isCategory else ('aae', 'altMAPE')
trivialErrorMetric = 'avg_err' if isCategory else 'altMAPE'
oneGramErrorMetric = 'avg_err' if isCategory else 'altMAPE'
movingAverageBaselineName = 'moving_mode' if isCategory else 'moving_mean'
# Multi-step metrics
for metricName in metricNames:
metricSpec, metricLabel = \
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepBestPredictions,
metric='multiStep',
params={'errorMetric': metricName,
'window':metricWindow,
'steps': predictionSteps},
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If the custom error metric was specified, add that
if options["customErrorMetric"] is not None :
metricParams = dict(options["customErrorMetric"])
metricParams['errorMetric'] = 'custom_error_metric'
metricParams['steps'] = predictionSteps
# If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in metricParams:
metricParams["errorWindow"] = metricWindow
metricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepPredictions,
metric="multiStep",
params=metricParams,
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If this is the first specified step size, optimize for it. Be sure to
# escape special characters since this is a regular expression
optimizeMetricSpec = metricSpec
metricLabel = metricLabel.replace('[', '\\[')
metricLabel = metricLabel.replace(']', '\\]')
optimizeMetricLabel = metricLabel
if options["customErrorMetric"] is not None :
optimizeMetricLabel = ".*custom_error_metric.*"
# Add in the trivial metrics
if options["runBaselines"] \
and inferenceType != InferenceType.NontemporalClassification:
for steps in predictionSteps:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric,
'steps': steps})
)
##Add in the One-Gram baseline error metric
#metricSpecStrings.append(
# _generateMetricSpecString(field=predictedFieldName,
# inferenceElement=InferenceElement.encodings,
# metric="two_gram",
# params={'window':metricWindow,
# "errorMetric":oneGramErrorMetric,
# 'predictionField':predictedFieldName,
# 'steps': steps})
# )
#
#Include the baseline moving mean/mode metric
if isCategory:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200,
"steps": steps})
)
else :
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"altMAPE",
"mean_window":200,
"steps": steps})
)
# -----------------------------------------------------------------------
# Metrics for classification
elif inferenceType in (InferenceType.TemporalClassification):
metricName = 'avg_err'
trivialErrorMetric = 'avg_err'
oneGramErrorMetric = 'avg_err'
movingAverageBaselineName = 'moving_mode'
optimizeMetricSpec, optimizeMetricLabel = \
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=metricName,
params={'window':metricWindow},
returnLabel=True)
metricSpecStrings.append(optimizeMetricSpec)
if options["runBaselines"]:
# If temporal, generate the trivial predictor metric
if inferenceType == InferenceType.TemporalClassification:
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="two_gram",
params={'window':metricWindow,
"errorMetric":oneGramErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200})
)
# Custom Error Metric
if not options["customErrorMetric"] == None :
#If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in options["customErrorMetric"]:
options["customErrorMetric"]["errorWindow"] = metricWindow
optimizeMetricSpec = _generateMetricSpecString(
inferenceElement=InferenceElement.classification,
metric="custom",
params=options["customErrorMetric"])
optimizeMetricLabel = ".*custom_error_metric.*"
metricSpecStrings.append(optimizeMetricSpec)
# -----------------------------------------------------------------------
# If plug in the predictionSteps variable for any dynamically generated
# prediction steps
if options['dynamicPredictionSteps']:
for i in range(len(metricSpecStrings)):
metricSpecStrings[i] = metricSpecStrings[i].replace(
"'$REPLACE_ME'", "predictionSteps")
optimizeMetricLabel = optimizeMetricLabel.replace(
"'$REPLACE_ME'", ".*")
return metricSpecStrings, optimizeMetricLabel | 108,657 |
Add noise to the given input.
Parameters:
-----------------------------------------------
input: the input to add noise to
noise: how much noise to add
doForeground: If true, turn off some of the 1 bits in the input
doBackground: If true, turn on some of the 0 bits in the input | def addNoise(input, noise=0.1, doForeground=True, doBackground=True):
if doForeground and doBackground:
return numpy.abs(input - (numpy.random.random(input.shape) < noise))
else:
if doForeground:
return numpy.logical_and(input, numpy.random.random(input.shape) > noise)
if doBackground:
return numpy.logical_or(input, numpy.random.random(input.shape) < noise)
return input | 108,706 |
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row. | def generateCoincMatrix(nCoinc=10, length=500, activity=50):
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
# This is the right code to use, it's faster, but it derails the unit
# testing of the pooling for now.
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0 | 108,707 |
Generate a non overlapping coincidence matrix. This is used to generate random
inputs to the temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row. | def generateSimpleCoincMatrix(nCoinc=10, length=500, activity=50):
assert nCoinc*activity<=length, "can't generate non-overlapping coincidences"
coincMatrix = SM32(0, length)
coinc = numpy.zeros(length, dtype='int32')
for i in xrange(nCoinc):
coinc[:] = 0
coinc[i*activity:(i+1)*activity] = 1
coincMatrix.addRow(coinc)
return coincMatrix | 108,712 |
Convert a list of sequences of pattern indices, and a pattern lookup table
into a an array of patterns
Parameters:
-----------------------------------------------
seq: the sequence, given as indices into the patternMatrix
patternMatrix: a SparseMatrix contaning the possible patterns used in
the sequence. | def vectorsFromSeqList(seqList, patternMatrix):
totalLen = 0
for seq in seqList:
totalLen += len(seq)
vectors = numpy.zeros((totalLen, patternMatrix.shape[1]), dtype='bool')
vecOffset = 0
for seq in seqList:
seq = numpy.array(seq, dtype='uint32')
for idx,coinc in enumerate(seq):
vectors[vecOffset] = patternMatrix.getRow(int(coinc))
vecOffset += 1
return vectors | 108,715 |
Returns 3 things for a vector:
* the total on time
* the number of runs
* a list of the durations of each run.
Parameters:
-----------------------------------------------
input stream: 11100000001100000000011111100000
return value: (11, 3, [3, 2, 6]) | def _listOfOnTimesInVec(vector):
# init counters
durations = []
numOnTimes = 0
totalOnTime = 0
# Find where the nonzeros are
nonzeros = numpy.array(vector).nonzero()[0]
# Nothing to do if vector is empty
if len(nonzeros) == 0:
return (0, 0, [])
# Special case of only 1 on bit
if len(nonzeros) == 1:
return (1, 1, [1])
# Count the consecutive non-zeros
prev = nonzeros[0]
onTime = 1
endIdx = nonzeros[-1]
for idx in nonzeros[1:]:
if idx != prev+1:
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
onTime = 1
else:
onTime += 1
prev = idx
# Add in the last one
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
return (totalOnTime, numOnTimes, durations) | 108,724 |
Returns the stability for the population averaged over multiple time steps
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples the number of time steps where stability is counted
At each time step, count the fraction of the active elements which are stable
from the previous step
Average all the fraction | def populationStability(vectors, numSamples=None):
# ----------------------------------------------------------------------
# Calculate the stability
numVectors = len(vectors)
if numSamples is None:
numSamples = numVectors-1
countOn = range(numVectors-1)
else:
countOn = numpy.random.randint(0, numVectors-1, numSamples)
sigmap = 0.0
for i in countOn:
match = checkMatch(vectors[i], vectors[i+1], sparse=False)
# Ignore reset vectors (all 0's)
if match[1] != 0:
sigmap += float(match[0])/match[1]
return sigmap / numSamples | 108,730 |
Returns the percent of the outputs that remain completely stable over
N time steps.
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples: the number of time steps where stability is counted
For each window of numSamples, count how many outputs are active during
the entire window. | def percentOutputsStableOverNTimeSteps(vectors, numSamples=None):
# ----------------------------------------------------------------------
# Calculate the stability
totalSamples = len(vectors)
windowSize = numSamples
# Process each window
numWindows = 0
pctStable = 0
for wStart in range(0, totalSamples-windowSize+1):
# Count how many elements are active for the entire time
data = vectors[wStart:wStart+windowSize]
outputSums = data.sum(axis=0)
stableOutputs = (outputSums == windowSize).sum()
# Accumulated
samplePctStable = float(stableOutputs) / data[0].sum()
print samplePctStable
pctStable += samplePctStable
numWindows += 1
# Return percent average over all possible windows
return float(pctStable) / numWindows | 108,731 |
Set the random seed and the numpy seed
Parameters:
--------------------------------------------------------------------
seed: random seed | def setSeed(self, seed):
rand.seed(seed)
np.random.seed(seed) | 108,788 |
Add multiple fields to the dataset.
Parameters:
-------------------------------------------------------------------
fieldsInfo: A list of dictionaries, containing a field name, specs for
the data classes and encoder params for the corresponding
field. | def addMultipleFields(self, fieldsInfo):
assert all(x in field for x in ['name', 'fieldSpec', 'encoderParams'] for field \
in fieldsInfo)
for spec in fieldsInfo:
self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams')) | 108,790 |
Initialize field using relevant encoder parameters.
Parameters:
-------------------------------------------------------------------
name: Field name
encoderParams: Parameters for the encoder.
Returns the index of the field | def defineField(self, name, encoderParams=None):
self.fields.append(_field(name, encoderParams))
return len(self.fields)-1 | 108,791 |
Set flag for field at index. Flags are special characters such as 'S' for
sequence or 'T' for timestamp.
Parameters:
--------------------------------------------------------------------
index: index of field whose flag is being set
flag: special character | def setFlag(self, index, flag):
assert len(self.fields)>index
self.fields[index].flag=flag | 108,792 |
Encode a record as a sparse distributed representation
Parameters:
--------------------------------------------------------------------
record: Record to be encoded
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields | def encodeRecord(self, record, toBeAdded=True):
encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i in \
xrange(len(self.fields))]
return encoding | 108,797 |
Encodes a list of records.
Parameters:
--------------------------------------------------------------------
records: One or more records. (i,j)th element of this 2D array
specifies the value at field j of record i.
If unspecified, records previously generated and stored are
used.
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields | def encodeAllRecords(self, records=None, toBeAdded=True):
if records is None:
records = self.getAllRecords()
if self.verbosity>0: print 'Encoding', len(records), 'records.'
encodings = [self.encodeRecord(record, toBeAdded) for record in records]
return encodings | 108,798 |
Add 'value' to the field i.
Parameters:
--------------------------------------------------------------------
value: value to be added
i: value is added to field i | def addValueToField(self, i, value=None):
assert(len(self.fields)>i)
if value is None:
value = self.fields[i].dataClass.getNext()
self.fields[i].addValue(value)
return value
else: self.fields[i].addValue(value) | 108,799 |
Export all the records into a csv file in numenta format.
Example header format:
fieldName1 fieldName2 fieldName3
date string float
T S
Parameters:
--------------------------------------------------------------------
path: Relative path of the file to which the records are to be exported | def saveRecords(self, path='myOutput'):
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
import csv
with open(path+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(self.getAllFieldNames())
writer.writerow(self.getAllDataTypes())
writer.writerow(self.getAllFlags())
writer.writerows(self.getAllRecords())
if self.verbosity>0:
print '******', numRecords,'records exported in numenta format to file:',\
path,'******\n' | 108,807 |
Instantiate our results database
Parameters:
--------------------------------------------------------------------
hsObj: Reference to the HypersearchV2 instance | def __init__(self, hsObj):
self._hsObj = hsObj
# This list holds all the results we have so far on every model. In
# addition, we maintain mutliple other data structures which provide
# faster access into portions of this list
self._allResults = []
# Models that completed with errors and all completed.
# These are used to determine when we should abort because of too many
# errors
self._errModels = set()
self._numErrModels = 0
self._completedModels = set()
self._numCompletedModels = 0
# Map of the model ID to index of result in _allResults
self._modelIDToIdx = dict()
# The global best result on the optimize metric so far, and the model ID
self._bestResult = numpy.inf
self._bestModelID = None
# This is a dict of dicts. The top level dict has the swarmId as the key.
# Each entry is a dict of genIdx: (modelId, errScore) entries.
self._swarmBestOverall = dict()
# For each swarm, we keep track of how many particles we have per generation
# The key is the swarmId, the value is a list of the number of particles
# at each generation
self._swarmNumParticlesPerGeneration = dict()
# The following variables are used to support the
# getMaturedSwarmGenerations() call.
#
# The _modifiedSwarmGens set contains the set of (swarmId, genIdx) tuples
# that have had results reported to them since the last time
# getMaturedSwarmGenerations() was called.
#
# The maturedSwarmGens contains (swarmId,genIdx) tuples, one for each
# swarm generation index which we have already detected has matured. This
# insures that if by chance we get a rogue report from a model in a swarm
# generation index which we have already assumed was matured that we won't
# report on it again.
self._modifiedSwarmGens = set()
self._maturedSwarmGens = set()
# For each particle, we keep track of it's best score (across all
# generations) and the position it was at when it got that score. The keys
# in this dict are the particleId, the values are (bestResult, position),
# where position is a dict with varName:position items in it.
self._particleBest = dict()
# For each particle, we keep track of it's latest generation index.
self._particleLatestGenIdx = dict()
# For each swarm, we keep track of which models are in it. The key
# is the swarmId, the value is a list of indexes into self._allResults.
self._swarmIdToIndexes = dict()
# ParamsHash to index mapping
self._paramsHashToIndexes = dict() | 108,829 |
Return the modelID of the model with the given paramsHash, or
None if not found.
Parameters:
---------------------------------------------------------------------
paramsHash: paramsHash to look for
retval: modelId, or None if not found | def getModelIDFromParamsHash(self, paramsHash):
entryIdx = self. _paramsHashToIndexes.get(paramsHash, None)
if entryIdx is not None:
return self._allResults[entryIdx]['modelID']
else:
return None | 108,831 |
Return particle info for a specific modelId.
Parameters:
---------------------------------------------------------------------
modelId: which model Id
retval: (particleState, modelId, errScore, completed, matured) | def getParticleInfo(self, modelId):
entry = self._allResults[self._modelIDToIdx[modelId]]
return (entry['modelParams']['particleState'], modelId, entry['errScore'],
entry['completed'], entry['matured']) | 108,834 |
Return a list of swarm generations that have completed and the
best (minimal) errScore seen for each of them.
Parameters:
---------------------------------------------------------------------
retval: list of tuples. Each tuple is of the form:
(swarmId, genIdx, bestErrScore) | def getMaturedSwarmGenerations(self):
# Return results go in this list
result = []
# For each of the swarm generations which have had model result updates
# since the last time we were called, see which have completed.
modifiedSwarmGens = sorted(self._modifiedSwarmGens)
# Walk through them in order from lowest to highest generation index
for key in modifiedSwarmGens:
(swarmId, genIdx) = key
# Skip it if we've already reported on it. This should happen rarely, if
# ever. It means that some worker has started and completed a model in
# this generation after we've determined that the generation has ended.
if key in self._maturedSwarmGens:
self._modifiedSwarmGens.remove(key)
continue
# If the previous generation for this swarm is not complete yet, don't
# bother evaluating this one.
if (genIdx >= 1) and not (swarmId, genIdx-1) in self._maturedSwarmGens:
continue
# We found a swarm generation that had some results reported since last
# time, see if it's complete or not
(_, _, errScores, completedFlags, maturedFlags) = \
self.getParticleInfos(swarmId, genIdx)
maturedFlags = numpy.array(maturedFlags)
numMatured = maturedFlags.sum()
if numMatured >= self._hsObj._minParticlesPerSwarm \
and numMatured == len(maturedFlags):
errScores = numpy.array(errScores)
bestScore = errScores.min()
self._maturedSwarmGens.add(key)
self._modifiedSwarmGens.remove(key)
result.append((swarmId, genIdx, bestScore))
# Return results
return result | 108,837 |
If there are any models that haven't been updated in a while, consider
them dead, and mark them as hidden in our resultsDB. We also change the
paramsHash and particleHash of orphaned models so that we can
re-generate that particle and/or model again if we desire.
Parameters:
----------------------------------------------------------------------
retval: | def _checkForOrphanedModels (self):
self.logger.debug("Checking for orphaned models older than %s" % \
(self._modelOrphanIntervalSecs))
while True:
orphanedModelId = self._cjDAO.modelAdoptNextOrphan(self._jobID,
self._modelOrphanIntervalSecs)
if orphanedModelId is None:
return
self.logger.info("Removing orphaned model: %d" % (orphanedModelId))
# Change the model hash and params hash as stored in the models table so
# that we can insert a new model with the same paramsHash
for attempt in range(100):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (orphanedModelId,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (orphanedModelId,
attempt)).digest()
try:
self._cjDAO.modelSetFields(orphanedModelId,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
# Mark this model as complete, with reason "orphaned"
self._cjDAO.modelSetCompleted(modelID=orphanedModelId,
completionReason=ClientJobsDAO.CMPL_REASON_ORPHAN,
completionMsg="Orphaned")
# Update our results DB immediately, rather than wait for the worker
# to inform us. This insures that the getParticleInfos() calls we make
# below don't include this particle. Setting the metricResult to None
# sets it to worst case
self._resultsDB.update(modelID=orphanedModelId,
modelParams=None,
modelParamsHash=paramsHash,
metricResult=None,
completed = True,
completionReason = ClientJobsDAO.CMPL_REASON_ORPHAN,
matured = True,
numRecords = 0) | 108,844 |
Back up a file
Parameters:
----------------------------------------------------------------------
retval: Filepath of the back-up | def _backupFile(filePath):
assert os.path.exists(filePath)
stampNum = 0
(prefix, suffix) = os.path.splitext(filePath)
while True:
backupPath = "%s.%d%s" % (prefix, stampNum, suffix)
stampNum += 1
if not os.path.exists(backupPath):
break
shutil.copyfile(filePath, backupPath)
return backupPath | 108,864 |
Pick up the latest search from a saved jobID and monitor it to completion
Parameters:
----------------------------------------------------------------------
retval: nothing | def pickupSearch(self):
self.__searchJob = self.loadSavedHyperSearchJob(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"])
self.monitorSearchJob() | 108,867 |
Launch worker processes to execute the given command line
Parameters:
-----------------------------------------------
cmdLine: The command line for each worker
numWorkers: number of workers to launch | def _launchWorkers(self, cmdLine, numWorkers):
self._workers = []
for i in range(numWorkers):
stdout = tempfile.NamedTemporaryFile(delete=False)
stderr = tempfile.NamedTemporaryFile(delete=False)
p = subprocess.Popen(cmdLine, bufsize=1, env=os.environ, shell=True,
stdin=None, stdout=stdout, stderr=stderr)
p._stderr_file = stderr
p._stdout_file = stdout
self._workers.append(p) | 108,869 |
Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job | def __startSearch(self):
# This search uses a pre-existing permutations script
params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,
forRunning=True)
if self._options["action"] == "dryRun":
args = [sys.argv[0], "--params=%s" % (json.dumps(params))]
print
print "=================================================================="
print "RUNNING PERMUTATIONS INLINE as \"DRY RUN\"..."
print "=================================================================="
jobID = hypersearch_worker.main(args)
else:
cmdLine = _setUpExports(self._options["exports"])
# Begin the new search. The {JOBID} string is replaced by the actual
# jobID returned from jobInsert.
cmdLine += "$HYPERSEARCH"
maxWorkers = self._options["maxWorkers"]
jobID = self.__cjDAO.jobInsert(
client="GRP",
cmdLine=cmdLine,
params=json.dumps(params),
minimumWorkers=1,
maximumWorkers=maxWorkers,
jobType=self.__cjDAO.JOB_TYPE_HS)
cmdLine = "python -m nupic.swarming.hypersearch_worker" \
" --jobID=%d" % (jobID)
self._launchWorkers(cmdLine, maxWorkers)
searchJob = _HyperSearchJob(jobID)
# Save search ID to file (this is used for report generation)
self.__saveHyperSearchJobID(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"],
hyperSearchJob=searchJob)
if self._options["action"] == "dryRun":
print "Successfully executed \"dry-run\" hypersearch, jobID=%d" % (jobID)
else:
print "Successfully submitted new HyperSearch job, jobID=%d" % (jobID)
_emit(Verbosity.DEBUG,
"Each worker executing the command line: %s" % (cmdLine,))
return searchJob | 108,870 |
Instantiates a _HyperSearchJob instance from info saved in file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: _HyperSearchJob instance; raises exception if not found | def loadSavedHyperSearchJob(cls, permWorkDir, outputLabel):
jobID = cls.__loadHyperSearchJobID(permWorkDir=permWorkDir,
outputLabel=outputLabel)
searchJob = _HyperSearchJob(nupicJobID=jobID)
return searchJob | 108,872 |
Saves the given _HyperSearchJob instance's jobID to file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
hyperSearchJob: _HyperSearchJob instance
retval: nothing | def __saveHyperSearchJobID(cls, permWorkDir, outputLabel, hyperSearchJob):
jobID = hyperSearchJob.getJobID()
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
if os.path.exists(filePath):
_backupFile(filePath)
d = dict(hyperSearchJobID = jobID)
with open(filePath, "wb") as jobIdPickleFile:
pickle.dump(d, jobIdPickleFile) | 108,873 |
Loads a saved jobID from file
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: HyperSearch jobID; raises exception if not found. | def __loadHyperSearchJobID(cls, permWorkDir, outputLabel):
filePath = cls.__getHyperSearchJobIDFilePath(permWorkDir=permWorkDir,
outputLabel=outputLabel)
jobID = None
with open(filePath, "r") as jobIdPickleFile:
jobInfo = pickle.load(jobIdPickleFile)
jobID = jobInfo["hyperSearchJobID"]
return jobID | 108,874 |
Returns filepath where to store HyperSearch JobID
Parameters:
----------------------------------------------------------------------
permWorkDir: Directory path for saved jobID file
outputLabel: Label string for incorporating into file name for saved jobID
retval: Filepath where to store HyperSearch JobID | def __getHyperSearchJobIDFilePath(cls, permWorkDir, outputLabel):
# Get the base path and figure out the path of the report file.
basePath = permWorkDir
# Form the name of the output csv file that will contain all the results
filename = "%s_HyperSearchJobID.pkl" % (outputLabel,)
filepath = os.path.join(basePath, filename)
return filepath | 108,875 |
Emit model info to csv file
Parameters:
----------------------------------------------------------------------
modelInfo: _NupicModelInfo instance
retval: nothing | def emit(self, modelInfo):
# Open/init csv file, if needed
if self.__csvFileObj is None:
# sets up self.__sortedVariableNames and self.__csvFileObj
self.__openAndInitCSVFile(modelInfo)
csv = self.__csvFileObj
# Emit model info row to report.csv
print >> csv, "%s, " % (self.__searchJobID),
print >> csv, "%s, " % (modelInfo.getModelID()),
print >> csv, "%s, " % (modelInfo.statusAsString()),
if modelInfo.isFinished():
print >> csv, "%s, " % (modelInfo.getCompletionReason()),
else:
print >> csv, "NA, ",
if not modelInfo.isWaitingToStart():
print >> csv, "%s, " % (modelInfo.getStartTime()),
else:
print >> csv, "NA, ",
if modelInfo.isFinished():
dateFormat = "%Y-%m-%d %H:%M:%S"
startTime = modelInfo.getStartTime()
endTime = modelInfo.getEndTime()
print >> csv, "%s, " % endTime,
st = datetime.strptime(startTime, dateFormat)
et = datetime.strptime(endTime, dateFormat)
print >> csv, "%s, " % (str((et - st).seconds)),
else:
print >> csv, "NA, ",
print >> csv, "NA, ",
print >> csv, "%s, " % str(modelInfo.getModelDescription()),
print >> csv, "%s, " % str(modelInfo.getNumRecords()),
paramLabelsDict = modelInfo.getParamLabels()
for key in self.__sortedVariableNames:
# Some values are complex structures,.. which need to be represented as
# strings
if key in paramLabelsDict:
print >> csv, "%s, " % (paramLabelsDict[key]),
else:
print >> csv, "None, ",
metrics = modelInfo.getReportMetrics()
for key in self.__sortedMetricsKeys:
value = metrics.get(key, "NA")
value = str(value)
value = value.replace("\n", " ")
print >> csv, "%s, " % (value),
print >> csv | 108,879 |
Close file and print report/backup csv file paths
Parameters:
----------------------------------------------------------------------
retval: nothing | def finalize(self):
if self.__csvFileObj is not None:
# Done with file
self.__csvFileObj.close()
self.__csvFileObj = None
print "Report csv saved in %s" % (self.__reportCSVPath,)
if self.__backupCSVPath:
print "Previous report csv file was backed up to %s" % \
(self.__backupCSVPath,)
else:
print "Nothing was written to report csv file." | 108,880 |
- Backs up old report csv file;
- opens the report csv file in append or overwrite mode (per
self.__replaceReport);
- emits column fields;
- sets up self.__sortedVariableNames, self.__csvFileObj,
self.__backupCSVPath, and self.__reportCSVPath
Parameters:
----------------------------------------------------------------------
modelInfo: First _NupicModelInfo instance passed to emit()
retval: nothing | def __openAndInitCSVFile(self, modelInfo):
# Get the base path and figure out the path of the report file.
basePath = self.__outputDirAbsPath
# Form the name of the output csv file that will contain all the results
reportCSVName = "%s_Report.csv" % (self.__outputLabel,)
reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)
# If a report CSV file already exists, back it up
backupCSVPath = None
if os.path.exists(reportCSVPath):
backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)
# Open report file
if self.__replaceReport:
mode = "w"
else:
mode = "a"
csv = self.__csvFileObj = open(reportCSVPath, mode)
# If we are appending, add some blank line separators
if not self.__replaceReport and backupCSVPath:
print >> csv
print >> csv
# Print the column names
print >> csv, "jobID, ",
print >> csv, "modelID, ",
print >> csv, "status, " ,
print >> csv, "completionReason, ",
print >> csv, "startTime, ",
print >> csv, "endTime, ",
print >> csv, "runtime(s), " ,
print >> csv, "expDesc, ",
print >> csv, "numRecords, ",
for key in self.__sortedVariableNames:
print >> csv, "%s, " % key,
for key in self.__sortedMetricsKeys:
print >> csv, "%s, " % key,
print >> csv | 108,881 |
_NupicJob constructor
Parameters:
----------------------------------------------------------------------
retval: Nupic Client JobID of the job | def __init__(self, nupicJobID):
self.__nupicJobID = nupicJobID
jobInfo = _clientJobsDB().jobInfo(nupicJobID)
assert jobInfo is not None, "jobID=%s not found" % nupicJobID
assert jobInfo.jobId == nupicJobID, "%s != %s" % (jobInfo.jobId, nupicJobID)
_emit(Verbosity.DEBUG, "_NupicJob: \n%s" % pprint.pformat(jobInfo, indent=4))
if jobInfo.params is not None:
self.__params = json.loads(jobInfo.params)
else:
self.__params = None | 108,882 |
Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs | def queryModelIDs(self):
jobID = self.getJobID()
modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)
modelIDs = tuple(x[0] for x in modelCounterPairs)
return modelIDs | 108,885 |
Unwraps self.__rawInfo.params into the equivalent python dictionary
and caches it in self.__cachedParams. Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: Model params dictionary as correpsonding to the json
as returned in ClientJobsDAO.modelsInfo()[x].params | def __unwrapParams(self):
if self.__cachedParams is None:
self.__cachedParams = json.loads(self.__rawInfo.params)
assert self.__cachedParams is not None, \
"%s resulted in None" % self.__rawInfo.params
return self.__cachedParams | 108,892 |
Retrives a dictionary of metrics that combines all report and
optimization metrics
Parameters:
----------------------------------------------------------------------
retval: a dictionary of optimization metrics that were collected
for the model; an empty dictionary if there aren't any. | def getAllMetrics(self):
result = self.getReportMetrics()
result.update(self.getOptimizationMetrics())
return result | 108,893 |
Unwraps self.__rawInfo.results and caches it in self.__cachedResults;
Returns the unwrapped params
Parameters:
----------------------------------------------------------------------
retval: ModelResults namedtuple instance | def __unwrapResults(self):
if self.__cachedResults is None:
if self.__rawInfo.results is not None:
resultList = json.loads(self.__rawInfo.results)
assert len(resultList) == 2, \
"Expected 2 elements, but got %s (%s)." % (
len(resultList), resultList)
self.__cachedResults = self.ModelResults(
reportMetrics=resultList[0],
optimizationMetrics=resultList[1])
else:
self.__cachedResults = self.ModelResults(
reportMetrics={},
optimizationMetrics={})
return self.__cachedResults | 108,894 |
Returns the periodic checks to see if the model should
continue running.
Parameters:
-----------------------------------------------------------------------
terminationFunc: The function that will be called in the model main loop
as a wrapper around this function. Must have a parameter
called 'index'
Returns: A list of PeriodicActivityRequest objects. | def getTerminationCallbacks(self, terminationFunc):
activities = [None] * len(ModelTerminator._MILESTONES)
for index, (iteration, _) in enumerate(ModelTerminator._MILESTONES):
cb = functools.partial(terminationFunc, index=index)
activities[index] = PeriodicActivityRequest(repeating =False,
period = iteration,
cb=cb) | 108,904 |
Tell the writer which metrics should be written
Parameters:
-----------------------------------------------------------------------
metricsNames: A list of metric lables to be written | def setLoggedMetrics(self, metricNames):
if metricNames is None:
self.__metricNames = set([])
else:
self.__metricNames = set(metricNames) | 109,057 |
[virtual method override] Save a checkpoint of the prediction output
stream. The checkpoint comprises up to maxRows of the most recent inference
records.
Parameters:
----------------------------------------------------------------------
checkpointSink: A File-like object where predictions checkpoint data, if
any, will be stored.
maxRows: Maximum number of most recent inference rows
to checkpoint. | def checkpoint(self, checkpointSink, maxRows):
checkpointSink.truncate()
if self.__dataset is None:
if self.__checkpointCache is not None:
self.__checkpointCache.seek(0)
shutil.copyfileobj(self.__checkpointCache, checkpointSink)
checkpointSink.flush()
return
else:
# Nothing to checkpoint
return
self.__dataset.flush()
totalDataRows = self.__dataset.getDataRowCount()
if totalDataRows == 0:
# Nothing to checkpoint
return
# Open reader of prediction file (suppress missingValues conversion)
reader = FileRecordStream(self.__datasetPath, missingValues=[])
# Create CSV writer for writing checkpoint rows
writer = csv.writer(checkpointSink)
# Write the header row to checkpoint sink -- just field names
writer.writerow(reader.getFieldNames())
# Determine number of rows to checkpoint
numToWrite = min(maxRows, totalDataRows)
# Skip initial rows to get to the rows that we actually need to checkpoint
numRowsToSkip = totalDataRows - numToWrite
for i in xrange(numRowsToSkip):
reader.next()
# Write the data rows to checkpoint sink
numWritten = 0
while True:
row = reader.getNextRecord()
if row is None:
break;
row = [str(element) for element in row]
#print "DEBUG: _BasicPredictionWriter: checkpointing row: %r" % (row,)
writer.writerow(row)
numWritten +=1
assert numWritten == numToWrite, \
"numWritten (%s) != numToWrite (%s)" % (numWritten, numToWrite)
checkpointSink.flush()
return | 109,061 |
Generates a set of input record
Params:
numRecords - how many records to generate
elemSize - the size of each record (num 0s or 1s)
numSet - how many 1s in each record
Returns: a list of inputs | def generateRandomInput(numRecords, elemSize = 400, numSet = 42):
inputs = []
for _ in xrange(numRecords):
input = np.zeros(elemSize, dtype=realDType)
for _ in range(0,numSet):
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
while abs(input.sum() - numSet) > 0.1:
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
inputs.append(input)
return inputs | 109,078 |
Stores the current model results in the manager's internal store
Parameters:
-----------------------------------------------------------------------
results: A ModelResults object that contains the current timestep's
input/inferences | def _addResults(self, results):
# -----------------------------------------------------------------------
# If the model potentially has temporal inferences.
if self.__isTemporal:
shiftedInferences = self.__inferenceShifter.shift(results).inferences
self.__currentResult = copy.deepcopy(results)
self.__currentResult.inferences = shiftedInferences
self.__currentInference = shiftedInferences
# -----------------------------------------------------------------------
# The current model has no temporal inferences.
else:
self.__currentResult = copy.deepcopy(results)
self.__currentInference = copy.deepcopy(results.inferences)
# -----------------------------------------------------------------------
# Save the current ground-truth results
self.__currentGroundTruth = copy.deepcopy(results) | 109,110 |
Get the actual value for this field
Parameters:
-----------------------------------------------------------------------
sensorInputElement: The inference element (part of the inference) that
is being used for this metric | def _getGroundTruth(self, inferenceElement):
sensorInputElement = InferenceElement.getInputElement(inferenceElement)
if sensorInputElement is None:
return None
return getattr(self.__currentGroundTruth.sensorInput, sensorInputElement) | 109,111 |
Creates the required metrics modules
Parameters:
-----------------------------------------------------------------------
metricSpecs:
A sequence of MetricSpec objects that specify which metric modules to
instantiate | def __constructMetricsModules(self, metricSpecs):
if not metricSpecs:
return
self.__metricSpecs = metricSpecs
for spec in metricSpecs:
if not InferenceElement.validate(spec.inferenceElement):
raise ValueError("Invalid inference element for metric spec: %r" %spec)
self.__metrics.append(metrics.getModule(spec))
self.__metricLabels.append(spec.getLabel()) | 109,112 |
Generates the ClientJobs database name for the given version of the
database
Parameters:
----------------------------------------------------------------
dbVersion: ClientJobs database version number
retval: the ClientJobs database name for the given DB version | def __getDBNameForVersion(cls, dbVersion):
# DB Name prefix for the given version
prefix = cls.__getDBNamePrefixForVersion(dbVersion)
# DB Name suffix
suffix = Configuration.get('nupic.cluster.database.nameSuffix')
# Replace dash and dot with underscore (e.g. 'ec2-user' or ec2.user will break SQL)
suffix = suffix.replace("-", "_")
suffix = suffix.replace(".", "_")
# Create the name of the database for the given DB version
dbName = '%s_%s' % (prefix, suffix)
return dbName | 109,139 |
Get the instance of the ClientJobsDAO created for this process (or
perhaps at some point in the future, for this thread).
Parameters:
----------------------------------------------------------------
retval: instance of ClientJobsDAO | def get():
# Instantiate if needed
if ClientJobsDAO._instance is None:
cjDAO = ClientJobsDAO()
cjDAO.connect()
ClientJobsDAO._instance = cjDAO
# Return the instance to the caller
return ClientJobsDAO._instance | 109,140 |
Instantiate a ClientJobsDAO instance.
Parameters:
---------------------------------------------------------------- | def __init__(self):
self._logger = _LOGGER
# Usage error to instantiate more than 1 instance per process
assert (ClientJobsDAO._instance is None)
# Create the name of the current version database
self.dbName = self._getDBName()
# NOTE: we set the table names here; the rest of the table info is set when
# the tables are initialized during connect()
self._jobs = self._JobsTableInfo()
self._jobs.tableName = '%s.jobs' % (self.dbName)
self._models = self._ModelsTableInfo()
self._models.tableName = '%s.models' % (self.dbName)
# Our connection ID, filled in during connect()
self._connectionID = None | 109,141 |
Convert a database internal column name to a public name. This
takes something of the form word1_word2_word3 and converts it to:
word1Word2Word3. If the db field name starts with '_', it is stripped out
so that the name is compatible with collections.namedtuple.
for example: _word1_word2_word3 => word1Word2Word3
Parameters:
--------------------------------------------------------------
dbName: database internal field name
retval: public name | def _columnNameDBToPublic(self, dbName):
words = dbName.split('_')
if dbName.startswith('_'):
words = words[1:]
pubWords = [words[0]]
for word in words[1:]:
pubWords.append(word[0].upper() + word[1:])
return ''.join(pubWords) | 109,142 |
Initialize tables, if needed
Parameters:
----------------------------------------------------------------
cursor: SQL cursor
deleteOldVersions: if true, delete any old versions of the DB left
on the server
recreate: if true, recreate the database from scratch even
if it already exists. | def _initTables(self, cursor, deleteOldVersions, recreate):
# Delete old versions if they exist
if deleteOldVersions:
self._logger.info(
"Dropping old versions of client_jobs DB; called from: %r",
traceback.format_stack())
for i in range(self._DB_VERSION):
cursor.execute('DROP DATABASE IF EXISTS %s' %
(self.__getDBNameForVersion(i),))
# Create the database if necessary
if recreate:
self._logger.info(
"Dropping client_jobs DB %r; called from: %r",
self.dbName, traceback.format_stack())
cursor.execute('DROP DATABASE IF EXISTS %s' % (self.dbName))
cursor.execute('CREATE DATABASE IF NOT EXISTS %s' % (self.dbName))
# Get the list of tables
cursor.execute('SHOW TABLES IN %s' % (self.dbName))
output = cursor.fetchall()
tableNames = [x[0] for x in output]
# ------------------------------------------------------------------------
# Create the jobs table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'jobs' not in tableNames:
self._logger.info("Creating table %r", self.jobsTableName)
fields = [
'job_id INT UNSIGNED NOT NULL AUTO_INCREMENT',
# unique jobID
'client CHAR(%d)' % (self.CLIENT_MAX_LEN),
# name of client (UI, StrmMgr, etc.)
'client_info LONGTEXT',
# Arbitrary data defined by the client
'client_key varchar(255)',
# Foreign key as defined by the client.
'cmd_line LONGTEXT',
# command line to use to launch each worker process
'params LONGTEXT',
# JSON encoded params for the job, for use by the worker processes
'job_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# unique hash of the job, provided by the client. Used for detecting
# identical job requests from the same client when they use the
# jobInsertUnique() method.
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings.
# NOTE: This is the job completion reason according to the hadoop
# job-tracker. A success here does not necessarily mean the
# workers were "happy" with the job. To see if the workers
# failed, check the worker_completion_reason
'completion_msg LONGTEXT',
# Why this job completed, according to job-tracker
'worker_completion_reason VARCHAR(16) DEFAULT "%s"' % \
self.CMPL_REASON_SUCCESS,
# One of the CMPL_REASON_XXX enumerated value strings. This is
# may be changed to CMPL_REASON_ERROR if any workers encounter
# an error while running the job.
'worker_completion_msg LONGTEXT',
# Why this job completed, according to workers. If
# worker_completion_reason is set to CMPL_REASON_ERROR, this will
# contain the error information.
'cancel BOOLEAN DEFAULT FALSE',
# set by UI, polled by engine
'start_time DATETIME DEFAULT NULL',
# When job started
'end_time DATETIME DEFAULT NULL',
# When job ended
'results LONGTEXT',
# JSON dict with general information about the results of the job,
# including the ID and value of the best model
# TODO: different semantics for results field of ProductionJob
'_eng_job_type VARCHAR(32)',
# String used to specify the type of job that this is. Current
# choices are hypersearch, production worker, or stream worker
'minimum_workers INT UNSIGNED DEFAULT 0',
# min number of desired workers at a time. If 0, no workers will be
# allocated in a crunch
'maximum_workers INT UNSIGNED DEFAULT 0',
# max number of desired workers at a time. If 0, then use as many
# as practical given load on the cluster.
'priority INT DEFAULT %d' % self.DEFAULT_JOB_PRIORITY,
# job scheduling priority; 0 is the default priority (
# ClientJobsDAO.DEFAULT_JOB_PRIORITY); positive values are higher
# priority (up to ClientJobsDAO.MAX_JOB_PRIORITY), and negative
# values are lower priority (down to ClientJobsDAO.MIN_JOB_PRIORITY)
'_eng_allocate_new_workers BOOLEAN DEFAULT TRUE',
# Should the scheduling algorithm allocate new workers to this job?
# If a specialized worker willingly gives up control, we set this
# field to FALSE to avoid allocating new workers.
'_eng_untended_dead_workers BOOLEAN DEFAULT FALSE',
# If a specialized worker fails or is killed by the scheduler, we
# set this feild to TRUE to indicate that the worker is dead
'num_failed_workers INT UNSIGNED DEFAULT 0',
# The number of failed specialized workers for this job. If the
# number of failures is >= max.failed.attempts, we mark the job
# as failed
'last_failed_worker_error_msg LONGTEXT',
# Error message of the most recent specialized failed worker
'_eng_cleaning_status VARCHAR(16) DEFAULT "%s"' % \
self.CLEAN_NOT_DONE,
# Has the job been garbage collected, this includes removing
# unneeded # model output caches, s3 checkpoints.
'gen_base_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'gen_permutations LONGTEXT',
# The contents of the generated permutations.py file from
# hypersearch requests. This is generated by the Hypersearch workers
# and stored here for reference, debugging, and development
# purposes.
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled jobs
'_eng_cjm_conn_id INT UNSIGNED',
# ID of the CJM starting up this job
'_eng_worker_state LONGTEXT',
# JSON encoded state of the hypersearch in progress, for private
# use by the Hypersearch workers
'_eng_status LONGTEXT',
# String used for status messages sent from the engine for
# informative purposes only. Usually printed periodically by
# clients watching a job progress.
'_eng_model_milestones LONGTEXT',
# JSon encoded object with information about global model milestone
# results
'PRIMARY KEY (job_id)',
'UNIQUE INDEX (client, job_hash)',
'INDEX (status)',
'INDEX (client_key)'
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.jobsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ------------------------------------------------------------------------
# Create the models table if it doesn't exist
# Fields that start with '_eng' are intended for private use by the engine
# and should not be used by the UI
if 'models' not in tableNames:
self._logger.info("Creating table %r", self.modelsTableName)
fields = [
'model_id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT',
# globally unique model ID
'job_id INT UNSIGNED NOT NULL',
# jobID
'params LONGTEXT NOT NULL',
# JSON encoded params for the model
'status VARCHAR(16) DEFAULT "notStarted"',
# One of the STATUS_XXX enumerated value strings
'completion_reason VARCHAR(16)',
# One of the CMPL_REASON_XXX enumerated value strings
'completion_msg LONGTEXT',
# Why this job completed
'results LONGTEXT DEFAULT NULL',
# JSON encoded structure containing metrics produced by the model
'optimized_metric FLOAT ',
#Value of the particular metric we are optimizing in hypersearch
'update_counter INT UNSIGNED DEFAULT 0',
# incremented by engine every time the results is updated
'num_records INT UNSIGNED DEFAULT 0',
# number of records processed so far
'start_time DATETIME DEFAULT NULL',
# When this model started being evaluated
'end_time DATETIME DEFAULT NULL',
# When this model completed
'cpu_time FLOAT DEFAULT 0',
# How much actual CPU time was spent on this model, in seconds. This
# excludes time the process spent sleeping, or otherwise not
# actually executing code.
'model_checkpoint_id LONGTEXT',
# Checkpoint identifier for this model (after it has been saved)
'gen_description LONGTEXT',
# The contents of the generated description.py file from hypersearch
# requests. This is generated by the Hypersearch workers and stored
# here for reference, debugging, and development purposes.
'_eng_params_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the params
'_eng_particle_hash BINARY(%d) DEFAULT NULL' % (self.HASH_MAX_LEN),
# MD5 hash of the particle info for PSO algorithm
'_eng_last_update_time DATETIME DEFAULT NULL',
# time stamp of last update, used for detecting stalled workers
'_eng_task_tracker_id TINYBLOB',
# Hadoop Task Tracker ID
'_eng_worker_id TINYBLOB',
# Hadoop Map Task ID
'_eng_attempt_id TINYBLOB',
# Hadoop Map task attempt ID
'_eng_worker_conn_id INT DEFAULT 0',
# database client connection ID of the worker that is running this
# model
'_eng_milestones LONGTEXT',
# A JSON encoded list of metric values for the model at each
# milestone point
'_eng_stop VARCHAR(16) DEFAULT NULL',
# One of the STOP_REASON_XXX enumerated value strings. Set either by
# the swarm terminator of either the current, or another
# Hypersearch worker.
'_eng_matured BOOLEAN DEFAULT FALSE',
# Set by the model maturity-checker when it decides that this model
# has "matured". This means that it has reached the point of
# not getting better results with more data.
'PRIMARY KEY (model_id)',
'UNIQUE INDEX (job_id, _eng_params_hash)',
'UNIQUE INDEX (job_id, _eng_particle_hash)',
]
options = [
'AUTO_INCREMENT=1000',
]
query = 'CREATE TABLE IF NOT EXISTS %s (%s) %s' % \
(self.modelsTableName, ','.join(fields), ','.join(options))
cursor.execute(query)
# ---------------------------------------------------------------------
# Get the field names for each table
cursor.execute('DESCRIBE %s' % (self.jobsTableName))
fields = cursor.fetchall()
self._jobs.dbFieldNames = [str(field[0]) for field in fields]
cursor.execute('DESCRIBE %s' % (self.modelsTableName))
fields = cursor.fetchall()
self._models.dbFieldNames = [str(field[0]) for field in fields]
# ---------------------------------------------------------------------
# Generate the public names
self._jobs.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._jobs.dbFieldNames]
self._models.publicFieldNames = [self._columnNameDBToPublic(x)
for x in self._models.dbFieldNames]
# ---------------------------------------------------------------------
# Generate the name conversion dicts
self._jobs.pubToDBNameDict = dict(
zip(self._jobs.publicFieldNames, self._jobs.dbFieldNames))
self._jobs.dbToPubNameDict = dict(
zip(self._jobs.dbFieldNames, self._jobs.publicFieldNames))
self._models.pubToDBNameDict = dict(
zip(self._models.publicFieldNames, self._models.dbFieldNames))
self._models.dbToPubNameDict = dict(
zip(self._models.dbFieldNames, self._models.publicFieldNames))
# ---------------------------------------------------------------------
# Generate the dynamic namedtuple classes we use
self._models.modelInfoNamedTuple = collections.namedtuple(
'_modelInfoNamedTuple', self._models.publicFieldNames)
self._jobs.jobInfoNamedTuple = collections.namedtuple(
'_jobInfoNamedTuple', self._jobs.publicFieldNames)
return | 109,144 |
For use only by Nupic Scheduler (also known as ClientJobManager) Look
through the jobs table and see if any new job requests have been
queued up. If so, pick one and mark it as starting up and create the
model table to hold the results
Parameters:
----------------------------------------------------------------
retval: jobID of the job we are starting up, if found; None if not found | def jobStartNext(self):
# NOTE: cursor.execute('SELECT @update_id') trick is unreliable: if a
# connection loss occurs during cursor.execute, then the server-cached
# information is lost, and we cannot get the updated job ID; so, we use
# this select instead
row = self._getOneMatchingRowWithRetries(
self._jobs, dict(status=self.STATUS_NOTSTARTED), ['job_id'])
if row is None:
return None
(jobID,) = row
self._startJobWithRetries(jobID)
return jobID | 109,156 |
Look through the jobs table and count the running jobs whose
cancel field is true.
Parameters:
----------------------------------------------------------------
retval: A count of running jobs with the cancel field set to true. | def jobCountCancellingJobs(self,):
with ConnectionFactory.get() as conn:
query = 'SELECT COUNT(job_id) '\
'FROM %s ' \
'WHERE (status<>%%s AND cancel is TRUE)' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows[0][0] | 109,160 |
Look through the jobs table and get the list of running jobs whose
cancel field is true.
Parameters:
----------------------------------------------------------------
retval: A (possibly empty) sequence of running job IDs with cancel field
set to true | def jobGetCancellingJobs(self,):
with ConnectionFactory.get() as conn:
query = 'SELECT job_id '\
'FROM %s ' \
'WHERE (status<>%%s AND cancel is TRUE)' \
% (self.jobsTableName,)
conn.cursor.execute(query, [self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return tuple(r[0] for r in rows) | 109,161 |
Generator to allow iterating slices at dynamic intervals
Parameters:
----------------------------------------------------------------
data: Any data structure that supports slicing (i.e. list or tuple)
*intervals: Iterable of intervals. The sum of intervals should be less
than, or equal to the length of data. | def partitionAtIntervals(data, intervals):
assert sum(intervals) <= len(data)
start = 0
for interval in intervals:
end = start + interval
yield data[start:end]
start = end
raise StopIteration | 109,162 |
Return a list of namedtuples from the result of a join query. A
single database result is partitioned at intervals corresponding to the
fields in namedTuples. The return value is the result of applying
namedtuple._make() to each of the partitions, for each of the namedTuples.
Parameters:
----------------------------------------------------------------
result: Tuple representing a single result from a database query
*namedTuples: List of named tuples. | def _combineResults(result, *namedTuples):
results = ClientJobsDAO.partitionAtIntervals(
result, [len(nt._fields) for nt in namedTuples])
return [nt._make(result) for nt, result in zip(namedTuples, results)] | 109,163 |
Get all info about a job
Parameters:
----------------------------------------------------------------
job: jobID of the job to query
retval: namedtuple containing the job info. | def jobInfo(self, jobID):
row = self._getOneMatchingRowWithRetries(
self._jobs, dict(job_id=jobID),
[self._jobs.pubToDBNameDict[n]
for n in self._jobs.jobInfoNamedTuple._fields])
if row is None:
raise RuntimeError("jobID=%s not found within the jobs table" % (jobID))
# Create a namedtuple with the names to values
return self._jobs.jobInfoNamedTuple._make(row) | 109,165 |
Change the status on the given job
Parameters:
----------------------------------------------------------------
job: jobID of the job to change status
status: new status string (ClientJobsDAO.STATUS_xxxxx)
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers | def jobSetStatus(self, jobID, status, useConnectionID=True,):
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
sqlParams = [status, jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError("Tried to change the status of job %d to %s, but "
"this job belongs to some other CJM" % (
jobID, status)) | 109,166 |
Change the status on the given job to completed
Parameters:
----------------------------------------------------------------
job: jobID of the job to mark as completed
completionReason: completionReason string
completionMsg: completionMsg string
useConnectionID: True if the connection id of the calling function
must be the same as the connection that created the job. Set
to False for hypersearch workers | def jobSetCompleted(self, jobID, completionReason, completionMsg,
useConnectionID = True):
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET status=%%s, ' \
' completion_reason=%%s, ' \
' completion_msg=%%s, ' \
' end_time=UTC_TIMESTAMP(), ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE job_id=%%s' \
% (self.jobsTableName,)
sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg,
jobID]
if useConnectionID:
query += ' AND _eng_cjm_conn_id=%s'
sqlParams.append(self._connectionID)
result = conn.cursor.execute(query, sqlParams)
if result != 1:
raise RuntimeError("Tried to change the status of jobID=%s to "
"completed, but this job could not be found or "
"belongs to some other CJM" % (jobID)) | 109,167 |
Cancel the given job. This will update the cancel field in the
jobs table and will result in the job being cancelled.
Parameters:
----------------------------------------------------------------
jobID: jobID of the job to mark as completed
to False for hypersearch workers | def jobCancel(self, jobID):
self._logger.info('Canceling jobID=%s', jobID)
# NOTE: jobSetFields does retries on transient mysql failures
self.jobSetFields(jobID, {"cancel" : True}, useConnectionID=False) | 109,168 |
Update the results string and last-update-time fields of a model.
Parameters:
----------------------------------------------------------------
jobID: job ID of model to modify
results: new results (json dict string) | def jobUpdateResults(self, jobID, results):
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_last_update_time=UTC_TIMESTAMP(), ' \
' results=%%s ' \
' WHERE job_id=%%s' % (self.jobsTableName,)
conn.cursor.execute(query, [results, jobID]) | 109,179 |
Delete all models from the models table
Parameters:
---------------------------------------------------------------- | def modelsClearAll(self):
self._logger.info('Deleting all rows from models table %r',
self.modelsTableName)
with ConnectionFactory.get() as conn:
query = 'DELETE FROM %s' % (self.modelsTableName)
conn.cursor.execute(query) | 109,180 |
Get ALL info for a set of models
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of nametuples containing all the fields stored for each
model. | def modelsInfo(self, modelIDs):
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"wrong modelIDs type: %s") % (type(modelIDs),)
assert modelIDs, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, dict(model_id=modelIDs),
[self._models.pubToDBNameDict[f]
for f in self._models.modelInfoNamedTuple._fields])
results = [self._models.modelInfoNamedTuple._make(r) for r in rows]
# NOTE: assetion will also fail if modelIDs contains duplicates
assert len(results) == len(modelIDs), "modelIDs not found: %s" % (
set(modelIDs) - set(r.modelId for r in results))
return results | 109,182 |
Get the params and paramsHash for a set of models.
WARNING!!!: The order of the results are NOT necessarily in the same order as
the order of the model IDs passed in!!!
Parameters:
----------------------------------------------------------------
modelIDs: list of model IDs
retval: list of result namedtuples defined in
ClientJobsDAO._models.getParamsNamedTuple. Each tuple
contains: (modelId, params, engParamsHash) | def modelsGetParams(self, modelIDs):
assert isinstance(modelIDs, self._SEQUENCE_TYPES), (
"Wrong modelIDs type: %r") % (type(modelIDs),)
assert len(modelIDs) >= 1, "modelIDs is empty"
rows = self._getMatchingRowsWithRetries(
self._models, {'model_id' : modelIDs},
[self._models.pubToDBNameDict[f]
for f in self._models.getParamsNamedTuple._fields])
# NOTE: assertion will also fail when modelIDs contains duplicates
assert len(rows) == len(modelIDs), "Didn't find modelIDs: %r" % (
(set(modelIDs) - set(r[0] for r in rows)),)
# Return the params and params hashes as a namedtuple
return [self._models.getParamsNamedTuple._make(r) for r in rows] | 109,187 |
Update the results string, and/or num_records fields of
a model. This will fail if the model does not currently belong to this
client (connection_id doesn't match).
Parameters:
----------------------------------------------------------------
modelID: model ID of model to modify
results: new results, or None to ignore
metricValue: the value of the metric being optimized, or None to ignore
numRecords: new numRecords, or None to ignore | def modelUpdateResults(self, modelID, results=None, metricValue =None,
numRecords=None):
assignmentExpressions = ['_eng_last_update_time=UTC_TIMESTAMP()',
'update_counter=update_counter+1']
assignmentValues = []
if results is not None:
assignmentExpressions.append('results=%s')
assignmentValues.append(results)
if numRecords is not None:
assignmentExpressions.append('num_records=%s')
assignmentValues.append(numRecords)
# NOTE1: (metricValue==metricValue) tests for Nan
# NOTE2: metricValue is being passed as numpy.float64
if metricValue is not None and (metricValue==metricValue):
assignmentExpressions.append('optimized_metric=%s')
assignmentValues.append(float(metricValue))
query = 'UPDATE %s SET %s ' \
' WHERE model_id=%%s and _eng_worker_conn_id=%%s' \
% (self.modelsTableName, ','.join(assignmentExpressions))
sqlParams = assignmentValues + [modelID, self._connectionID]
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
numRowsAffected = conn.cursor.execute(query, sqlParams)
if numRowsAffected != 1:
raise InvalidConnectionException(
("Tried to update the info of modelID=%r using connectionID=%r, but "
"this model belongs to some other worker or modelID not found; "
"numRowsAffected=%r") % (modelID,self._connectionID, numRowsAffected,)) | 109,190 |
Look through the models table for an orphaned model, which is a model
that is not completed yet, whose _eng_last_update_time is more than
maxUpdateInterval seconds ago.
If one is found, change its _eng_worker_conn_id to the current worker's
and return the model id.
Parameters:
----------------------------------------------------------------
retval: modelId of the model we adopted, or None if none found | def modelAdoptNextOrphan(self, jobId, maxUpdateInterval):
@g_retrySQL
def findCandidateModelWithRetries():
modelID = None
with ConnectionFactory.get() as conn:
# TODO: may need a table index on job_id/status for speed
query = 'SELECT model_id FROM %s ' \
' WHERE status=%%s ' \
' AND job_id=%%s ' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self.STATUS_RUNNING, jobId, maxUpdateInterval]
numRows = conn.cursor.execute(query, sqlParams)
rows = conn.cursor.fetchall()
assert numRows <= 1, "Unexpected numRows: %r" % numRows
if numRows == 1:
(modelID,) = rows[0]
return modelID
@g_retrySQL
def adoptModelWithRetries(modelID):
adopted = False
with ConnectionFactory.get() as conn:
query = 'UPDATE %s SET _eng_worker_conn_id=%%s, ' \
' _eng_last_update_time=UTC_TIMESTAMP() ' \
' WHERE model_id=%%s ' \
' AND status=%%s' \
' AND TIMESTAMPDIFF(SECOND, ' \
' _eng_last_update_time, ' \
' UTC_TIMESTAMP()) > %%s ' \
' LIMIT 1 ' \
% (self.modelsTableName,)
sqlParams = [self._connectionID, modelID, self.STATUS_RUNNING,
maxUpdateInterval]
numRowsAffected = conn.cursor.execute(query, sqlParams)
assert numRowsAffected <= 1, 'Unexpected numRowsAffected=%r' % (
numRowsAffected,)
if numRowsAffected == 1:
adopted = True
else:
# Discern between transient failure during update and someone else
# claiming this model
(status, connectionID) = self._getOneMatchingRowNoRetries(
self._models, conn, {'model_id':modelID},
['status', '_eng_worker_conn_id'])
adopted = (status == self.STATUS_RUNNING and
connectionID == self._connectionID)
return adopted
adoptedModelID = None
while True:
modelID = findCandidateModelWithRetries()
if modelID is None:
break
if adoptModelWithRetries(modelID):
adoptedModelID = modelID
break
return adoptedModelID | 109,192 |
Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs | def printOverlaps(comparedTo, coincs, seen):
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen | 109,348 |
Compute prediction accuracy by checking if the next page in the sequence is
within the top N predictions calculated by the model
Args:
model: HTM model
size: Sample size
top: top N predictions to use
Returns: Probability the next page in the sequence is within the top N
predicted pages | def computeAccuracy(model, size, top):
accuracy = []
# Load MSNBC web data file
filename = os.path.join(os.path.dirname(__file__), "msnbc990928.zip")
with zipfile.ZipFile(filename) as archive:
with archive.open("msnbc990928.seq") as datafile:
# Skip header lines (first 7 lines)
for _ in xrange(7):
next(datafile)
# Skip learning data and compute accuracy using only new sessions
for _ in xrange(LEARNING_RECORDS):
next(datafile)
# Compute prediction accuracy by checking if the next page in the sequence
# is within the top N predictions calculated by the model
for _ in xrange(size):
pages = readUserSession(datafile)
model.resetSequenceStates()
for i in xrange(len(pages) - 1):
result = model.run({"page": pages[i]})
inferences = result.inferences["multiStepPredictions"][1]
# Get top N predictions for the next page
predicted = sorted(inferences.items(), key=itemgetter(1), reverse=True)[:top]
# Check if the next page is within the predicted pages
accuracy.append(1 if pages[i + 1] in zip(*predicted)[0] else 0)
return np.mean(accuracy) | 109,394 |
Reads the user session record from the file's cursor position
Args:
datafile: Data file whose cursor points at the beginning of the record
Returns:
list of pages in the order clicked by the user | def readUserSession(datafile):
for line in datafile:
pages = line.split()
total = len(pages)
# Select user sessions with 2 or more pages
if total < 2:
continue
# Exclude outliers by removing extreme long sessions
if total > 500:
continue
return [PAGE_CATEGORIES[int(i) - 1] for i in pages]
return [] | 109,395 |
Returns the maximum delay for the InferenceElements in the inference
dictionary
Parameters:
-----------------------------------------------------------------------
inferences: A dictionary where the keys are InferenceElements | def getMaxDelay(inferences):
maxDelay = 0
for inferenceElement, inference in inferences.iteritems():
if isinstance(inference, dict):
for key in inference.iterkeys():
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement,
key),
maxDelay)
else:
maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement),
maxDelay)
return maxDelay | 109,418 |
Parse the given XML file and return a dict describing the file.
Parameters:
----------------------------------------------------------------
filename: name of XML file to parse (no path)
path: path of the XML file. If None, then use the standard
configuration search path.
retval: returns a dict with each property as a key and a dict of all
the property's attributes as value | def _readConfigFile(cls, filename, path=None):
outputProperties = dict()
# Get the path to the config files.
if path is None:
filePath = cls.findConfigFile(filename)
else:
filePath = os.path.join(path, filename)
# ------------------------------------------------------------------
# Read in the config file
try:
if filePath is not None:
try:
# Use warn since console log level is set to warning
_getLoggerBase().debug("Loading config file: %s", filePath)
with open(filePath, 'r') as inp:
contents = inp.read()
except Exception:
raise RuntimeError("Expected configuration file at %s" % filePath)
else:
# If the file was not found in the normal search paths, which includes
# checking the NTA_CONF_PATH, we'll try loading it from pkg_resources.
try:
contents = resource_string("nupic.support", filename)
except Exception as resourceException:
# We expect these to be read, and if they don't exist we'll just use
# an empty configuration string.
if filename in [USER_CONFIG, CUSTOM_CONFIG]:
contents = '<configuration/>'
else:
raise resourceException
elements = ElementTree.XML(contents)
if elements.tag != 'configuration':
raise RuntimeError("Expected top-level element to be 'configuration' "
"but got '%s'" % (elements.tag))
# ------------------------------------------------------------------
# Add in each property found
propertyElements = elements.findall('./property')
for propertyItem in propertyElements:
propInfo = dict()
# Parse this property element
propertyAttributes = list(propertyItem)
for propertyAttribute in propertyAttributes:
propInfo[propertyAttribute.tag] = propertyAttribute.text
# Get the name
name = propInfo.get('name', None)
# value is allowed to be empty string
if 'value' in propInfo and propInfo['value'] is None:
value = ''
else:
value = propInfo.get('value', None)
if value is None:
if 'novalue' in propInfo:
# Placeholder "novalue" properties are intended to be overridden
# via dynamic configuration or another configuration layer.
continue
else:
raise RuntimeError("Missing 'value' element within the property "
"element: => %s " % (str(propInfo)))
# The value is allowed to contain substitution tags of the form
# ${env.VARNAME}, which should be substituted with the corresponding
# environment variable values
restOfValue = value
value = ''
while True:
# Find the beginning of substitution tag
pos = restOfValue.find('${env.')
if pos == -1:
# No more environment variable substitutions
value += restOfValue
break
# Append prefix to value accumulator
value += restOfValue[0:pos]
# Find the end of current substitution tag
varTailPos = restOfValue.find('}', pos)
if varTailPos == -1:
raise RuntimeError(
"Trailing environment variable tag delimiter '}'"
" not found in %r" % (restOfValue))
# Extract environment variable name from tag
varname = restOfValue[pos + 6:varTailPos]
if varname not in os.environ:
raise RuntimeError("Attempting to use the value of the environment"
" variable %r, which is not defined" % (
varname))
envVarValue = os.environ[varname]
value += envVarValue
restOfValue = restOfValue[varTailPos + 1:]
# Check for errors
if name is None:
raise RuntimeError(
"Missing 'name' element within following property "
"element:\n => %s " % (str(propInfo)))
propInfo['value'] = value
outputProperties[name] = propInfo
return outputProperties
except Exception:
_getLoggerBase().exception("Error while parsing configuration file: %s.",
filePath)
raise | 109,423 |
Set multiple custom properties and persist them to the custom
configuration store.
Parameters:
----------------------------------------------------------------
properties: a dict of property name/value pairs to set | def setCustomProperties(cls, properties):
_getLogger().info("Setting custom configuration properties=%r; caller=%r",
properties, traceback.format_stack())
_CustomConfigurationFileWrapper.edit(properties)
for propertyName, value in properties.iteritems():
cls.set(propertyName, value) | 109,424 |
If persistent is True, delete the temporary file
Parameters:
----------------------------------------------------------------
persistent: if True, custom configuration file is deleted | def clear(cls, persistent=False):
if persistent:
try:
os.unlink(cls.getPath())
except OSError, e:
if e.errno != errno.ENOENT:
_getLogger().exception("Error %s while trying to remove dynamic " \
"configuration file: %s", e.errno,
cls.getPath())
raise
cls._path = None | 109,427 |
Edits the XML configuration file with the parameters specified by
properties
Parameters:
----------------------------------------------------------------
properties: dict of settings to be applied to the custom configuration store
(key is property name, value is value) | def edit(cls, properties):
copyOfProperties = copy(properties)
configFilePath = cls.getPath()
try:
with open(configFilePath, 'r') as fp:
contents = fp.read()
except IOError, e:
if e.errno != errno.ENOENT:
_getLogger().exception("Error %s reading custom configuration store "
"from %s, while editing properties %s.",
e.errno, configFilePath, properties)
raise
contents = '<configuration/>'
try:
elements = ElementTree.XML(contents)
ElementTree.tostring(elements)
except Exception, e:
# Raising error as RuntimeError with custom message since ElementTree
# exceptions aren't clear.
msg = "File contents of custom configuration is corrupt. File " \
"location: %s; Contents: '%s'. Original Error (%s): %s." % \
(configFilePath, contents, type(e), e)
_getLogger().exception(msg)
raise RuntimeError(msg), None, sys.exc_info()[2]
if elements.tag != 'configuration':
e = "Expected top-level element to be 'configuration' but got '%s'" % \
(elements.tag)
_getLogger().error(e)
raise RuntimeError(e)
# Apply new properties to matching settings in the custom config store;
# pop matching properties from our copy of the properties dict
for propertyItem in elements.findall('./property'):
propInfo = dict((attr.tag, attr.text) for attr in propertyItem)
name = propInfo['name']
if name in copyOfProperties:
foundValues = propertyItem.findall('./value')
if len(foundValues) > 0:
foundValues[0].text = str(copyOfProperties.pop(name))
if not copyOfProperties:
break
else:
e = "Property %s missing value tag." % (name,)
_getLogger().error(e)
raise RuntimeError(e)
# Add unmatched remaining properties to custom config store
for propertyName, value in copyOfProperties.iteritems():
newProp = ElementTree.Element('property')
nameTag = ElementTree.Element('name')
nameTag.text = propertyName
newProp.append(nameTag)
valueTag = ElementTree.Element('value')
valueTag.text = str(value)
newProp.append(valueTag)
elements.append(newProp)
try:
makeDirectoryFromAbsolutePath(os.path.dirname(configFilePath))
with open(configFilePath, 'w') as fp:
fp.write(ElementTree.tostring(elements))
except Exception, e:
_getLogger().exception("Error while saving custom configuration "
"properties %s in %s.", properties,
configFilePath)
raise | 109,429 |
Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy | def copyVarStatesFrom(self, particleState, varNames):
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng) | 109,436 |
Return the position of this particle. This returns a dict() of key
value pairs where each key is the name of the flattened permutation
variable and the value is its chosen value.
Parameters:
--------------------------------------------------------------
retval: dict() of flattened permutation choices | def getPosition(self):
result = dict()
for (varName, value) in self.permuteVars.iteritems():
result[varName] = value.getPosition()
return result | 109,437 |
Return the position of a particle given its state dict.
Parameters:
--------------------------------------------------------------
retval: dict() of particle position, keys are the variable names,
values are their positions | def getPositionFromState(pState):
result = dict()
for (varName, value) in pState['varStates'].iteritems():
result[varName] = value['position']
return result | 109,438 |
Agitate this particle so that it is likely to go to a new position.
Every time agitate is called, the particle is jiggled an even greater
amount.
Parameters:
--------------------------------------------------------------
retval: None | def agitate(self):
for (varName, var) in self.permuteVars.iteritems():
var.agitate()
self.newPosition() | 109,439 |
Choose a new position based on results obtained so far from all other
particles.
Parameters:
--------------------------------------------------------------
whichVars: If not None, only move these variables
retval: new position | def newPosition(self, whichVars=None):
# TODO: incorporate data from choice variables....
# TODO: make sure we're calling this when appropriate.
# Get the global best position for this swarm generation
globalBestPosition = None
# If speculative particles are enabled, use the global best considering
# even particles in the current generation. This gives better results
# but does not provide repeatable results because it depends on
# worker timing
if self._hsObj._speculativeParticles:
genIdx = self.genIdx
else:
genIdx = self.genIdx - 1
if genIdx >= 0:
(bestModelId, _) = self._resultsDB.bestModelIdAndErrScore(self.swarmId,
genIdx)
if bestModelId is not None:
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(
bestModelId)
globalBestPosition = Particle.getPositionFromState(particleState)
# Update each variable
for (varName, var) in self.permuteVars.iteritems():
if whichVars is not None and varName not in whichVars:
continue
if globalBestPosition is None:
var.newPosition(None, self._rng)
else:
var.newPosition(globalBestPosition[varName], self._rng)
# get the new position
position = self.getPosition()
# Log the new position
if self.logger.getEffectiveLevel() <= logging.DEBUG:
msg = StringIO.StringIO()
print >> msg, "New particle position: \n%s" % (pprint.pformat(position,
indent=4))
print >> msg, "Particle variables:"
for (varName, var) in self.permuteVars.iteritems():
print >> msg, " %s: %s" % (varName, str(var))
self.logger.debug(msg.getvalue())
msg.close()
return position | 109,440 |
Give the timestamp of a record (a datetime object), compute the record's
timestamp index - this is the timestamp divided by the aggregation period.
Parameters:
------------------------------------------------------------------------
recordTS: datetime instance
retval: record timestamp index, or None if no aggregation period | def _computeTimestampRecordIdx(self, recordTS):
if self._aggregationPeriod is None:
return None
# Base record index on number of elapsed months if aggregation is in
# months
if self._aggregationPeriod['months'] > 0:
assert self._aggregationPeriod['seconds'] == 0
result = int(
(recordTS.year * 12 + (recordTS.month-1)) /
self._aggregationPeriod['months'])
# Base record index on elapsed seconds
elif self._aggregationPeriod['seconds'] > 0:
delta = recordTS - datetime.datetime(year=1, month=1, day=1)
deltaSecs = delta.days * 24 * 60 * 60 \
+ delta.seconds \
+ delta.microseconds / 1000000.0
result = int(deltaSecs / self._aggregationPeriod['seconds'])
else:
result = None
return result | 109,578 |
Given the name of an aggregation function, returns the function pointer
and param.
Parameters:
------------------------------------------------------------------------
funcName: a string (name of function) or funcPtr
retval: (funcPtr, param) | def _getFuncPtrAndParams(self, funcName):
params = None
if isinstance(funcName, basestring):
if funcName == 'sum':
fp = _aggr_sum
elif funcName == 'first':
fp = _aggr_first
elif funcName == 'last':
fp = _aggr_last
elif funcName == 'mean':
fp = _aggr_mean
elif funcName == 'max':
fp = max
elif funcName == 'min':
fp = min
elif funcName == 'mode':
fp = _aggr_mode
elif funcName.startswith('wmean:'):
fp = _aggr_weighted_mean
paramsName = funcName[6:]
params = [f[0] for f in self._inputFields].index(paramsName)
else:
fp = funcName
return (fp, params) | 109,603 |
Generate the aggregated output record
Parameters:
------------------------------------------------------------------------
retval: outputRecord | def _createAggregateRecord(self):
record = []
for i, (fieldIdx, aggFP, paramIdx) in enumerate(self._fields):
if aggFP is None: # this field is not supposed to be aggregated.
continue
values = self._slice[i]
refIndex = None
if paramIdx is not None:
record.append(aggFP(values, self._slice[paramIdx]))
else:
record.append(aggFP(values))
return record | 109,604 |
Parse command line options
Args:
args: command line arguments (not including sys.argv[0])
Returns:
namedtuple ParseCommandLineOptionsResult | def _parseCommandLineOptions(args):
usageStr = (
"%prog [options] descriptionPyDirectory\n"
"This script runs a single OPF Model described by description.py "
"located in the given directory."
)
parser = optparse.OptionParser(usage=usageStr)
parser.add_option("-c",
help="Create a model and save it under the given "
"<CHECKPOINT> name, but don't run it",
dest="createCheckpointName",
action="store", type="string", default="",
metavar="<CHECKPOINT>")
parser.add_option("--listCheckpoints",
help="List all available checkpoints",
dest="listAvailableCheckpoints",
action="store_true", default=False)
parser.add_option("--listTasks",
help="List all task labels in description.py",
dest="listTasks",
action="store_true", default=False)
parser.add_option("--load",
help="Load a model from the given <CHECKPOINT> and run it. "
"Run with --listCheckpoints flag for more details. ",
dest="runCheckpointName",
action="store", type="string", default="",
metavar="<CHECKPOINT>")
parser.add_option("--newSerialization",
help="Use new capnproto serialization",
dest="newSerialization",
action="store_true", default=False)
#parser.add_option("--reuseDatasets",
# help="Keep existing generated/aggregated datasets",
# dest="reuseDatasets", action="store_true",
# default=False)
parser.add_option("--tasks",
help="Run the tasks with the given TASK LABELS "
"in the order they are given. Either end of "
"arg-list, or a standalone dot ('.') arg or "
"the next short or long option name (-a or "
"--blah) terminates the list. NOTE: FAILS "
"TO RECOGNIZE task label names with one or more "
"leading dashes. [default: run all of the tasks in "
"description.py]",
dest="taskLabels", default=[],
action="callback", callback=reapVarArgsCallback,
metavar="TASK_LABELS")
parser.add_option("--testMode",
help="Reduce iteration count for testing",
dest="testMode", action="store_true",
default=False)
parser.add_option("--noCheckpoint",
help="Don't checkpoint the model after running each task.",
dest="checkpointModel", action="store_false",
default=True)
options, experiments = parser.parse_args(args)
# Validate args
mutuallyExclusiveOptionCount = sum([bool(options.createCheckpointName),
options.listAvailableCheckpoints,
options.listTasks,
bool(options.runCheckpointName)])
if mutuallyExclusiveOptionCount > 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Options: -c, --listCheckpoints, --listTasks, and --load are "
"mutually exclusive. Please select only one")
mutuallyExclusiveOptionCount = sum([bool(not options.checkpointModel),
bool(options.createCheckpointName)])
if mutuallyExclusiveOptionCount > 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Options: -c and --noCheckpoint are "
"mutually exclusive. Please select only one")
if len(experiments) != 1:
_reportCommandLineUsageErrorAndExit(
parser,
"Exactly ONE experiment must be specified, but got %s (%s)" % (
len(experiments), experiments))
# Done with parser
parser.destroy()
# Prepare results
# Directory path of the experiment (that contain description.py)
experimentDir = os.path.abspath(experiments[0])
# RunExperiment.py's private options (g_parsedPrivateCommandLineOptionsSchema)
privateOptions = dict()
privateOptions['createCheckpointName'] = options.createCheckpointName
privateOptions['listAvailableCheckpoints'] = options.listAvailableCheckpoints
privateOptions['listTasks'] = options.listTasks
privateOptions['runCheckpointName'] = options.runCheckpointName
privateOptions['newSerialization'] = options.newSerialization
privateOptions['testMode'] = options.testMode
#privateOptions['reuseDatasets'] = options.reuseDatasets
privateOptions['taskLabels'] = options.taskLabels
privateOptions['checkpointModel'] = options.checkpointModel
result = ParseCommandLineOptionsResult(experimentDir=experimentDir,
privateOptions=privateOptions)
return result | 109,629 |
Creates and runs the experiment
Args:
options: namedtuple ParseCommandLineOptionsResult
model: For testing: may pass in an existing OPF Model instance
to use instead of creating a new one.
Returns: reference to OPFExperiment instance that was constructed (this
is provided to aid with debugging) or None, if none was
created. | def _runExperimentImpl(options, model=None):
json_helpers.validate(options.privateOptions,
schemaDict=g_parsedPrivateCommandLineOptionsSchema)
# Load the experiment's description.py module
experimentDir = options.experimentDir
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
experimentDir)
expIface = helpers.getExperimentDescriptionInterfaceFromModule(
descriptionPyModule)
# Handle "list checkpoints" request
if options.privateOptions['listAvailableCheckpoints']:
_printAvailableCheckpoints(experimentDir)
return None
# Load experiment tasks
experimentTasks = expIface.getModelControl().get('tasks', [])
# If the tasks list is empty, and this is a nupic environment description
# file being run from the OPF, convert it to a simple OPF description file.
if (len(experimentTasks) == 0 and
expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
expIface.convertNupicEnvToOPF()
experimentTasks = expIface.getModelControl().get('tasks', [])
# Ensures all the source locations are either absolute paths or relative to
# the nupic.datafiles package_data location.
expIface.normalizeStreamSources()
# Extract option
newSerialization = options.privateOptions['newSerialization']
# Handle listTasks
if options.privateOptions['listTasks']:
print "Available tasks:"
for label in [t['taskLabel'] for t in experimentTasks]:
print "\t", label
return None
# Construct the experiment instance
if options.privateOptions['runCheckpointName']:
assert model is None
checkpointName = options.privateOptions['runCheckpointName']
model = ModelFactory.loadFromCheckpoint(
savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName),
newSerialization=newSerialization)
elif model is not None:
print "Skipping creation of OPFExperiment instance: caller provided his own"
else:
modelDescription = expIface.getModelDescription()
model = ModelFactory.create(modelDescription)
# Handle "create model" request
if options.privateOptions['createCheckpointName']:
checkpointName = options.privateOptions['createCheckpointName']
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=checkpointName,
newSerialization=newSerialization)
return model
# Build the task list
# Default task execution index list is in the natural list order of the tasks
taskIndexList = range(len(experimentTasks))
customTaskExecutionLabelsList = options.privateOptions['taskLabels']
if customTaskExecutionLabelsList:
taskLabelsList = [t['taskLabel'] for t in experimentTasks]
taskLabelsSet = set(taskLabelsList)
customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)
assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
("Some custom-provided task execution labels don't correspond "
"to actual task labels: mismatched labels: %r; actual task "
"labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
customTaskExecutionLabelsList)
taskIndexList = [taskLabelsList.index(label) for label in
customTaskExecutionLabelsList]
print "#### Executing custom task list: %r" % [taskLabelsList[i] for
i in taskIndexList]
# Run all experiment tasks
for taskIndex in taskIndexList:
task = experimentTasks[taskIndex]
# Create a task runner and run it!
taskRunner = _TaskRunner(model=model,
task=task,
cmdOptions=options)
taskRunner.run()
del taskRunner
if options.privateOptions['checkpointModel']:
_saveModel(model=model,
experimentDir=experimentDir,
checkpointLabel=task['taskLabel'],
newSerialization=newSerialization)
return model | 109,632 |
Constructor
Args:
model: The OPF Model instance against which to run the task
task: A dictionary conforming to opfTaskSchema.json
cmdOptions: ParseCommandLineOptionsResult namedtuple | def __init__(self, model, task, cmdOptions):
validateOpfJsonValue(task, "opfTaskSchema.json")
# Set up our logger
self.__logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
#self.__logger.setLevel(logging.DEBUG)
self.__logger.debug(("Instantiated %s(" + \
"model=%r, " + \
"task=%r, " + \
"cmdOptions=%r)") % \
(self.__class__.__name__,
model,
task,
cmdOptions))
# Generate a new dataset from streamDef and create the dataset reader
streamDef = task['dataset']
datasetReader = opf_basic_environment.BasicDatasetReader(streamDef)
self.__model = model
self.__datasetReader = datasetReader
self.__task = task
self.__cmdOptions = cmdOptions
self.__predictionLogger = opf_basic_environment.BasicPredictionLogger(
fields=model.getFieldInfo(),
experimentDir=cmdOptions.experimentDir,
label=task['taskLabel'],
inferenceType=self.__model.getInferenceType())
taskControl = task['taskControl']
# Create Task Driver
self.__taskDriver = OPFTaskDriver(
taskControl=taskControl,
model=model)
loggedMetricPatterns = taskControl.get('loggedMetrics', None)
loggedMetricLabels = matchPatterns(loggedMetricPatterns,
self.__taskDriver.getMetricLabels())
self.__predictionLogger.setLoggedMetrics(loggedMetricLabels)
# Create a prediction metrics logger
self.__metricsLogger = opf_basic_environment.BasicPredictionMetricsLogger(
experimentDir=cmdOptions.experimentDir,
label=task['taskLabel']) | 109,639 |
The main function of the HypersearchWorker script. This parses the command
line arguments, instantiates a HypersearchWorker instance, and then
runs it.
Parameters:
----------------------------------------------------------------------
retval: jobID of the job we ran. This is used by unit test code
when calling this working using the --params command
line option (which tells this worker to insert the job
itself). | def main(argv):
parser = OptionParser(helpString)
parser.add_option("--jobID", action="store", type="int", default=None,
help="jobID of the job within the dbTable [default: %default].")
parser.add_option("--modelID", action="store", type="str", default=None,
help=("Tell worker to re-run this model ID. When specified, jobID "
"must also be specified [default: %default]."))
parser.add_option("--workerID", action="store", type="str", default=None,
help=("workerID of the scheduler's SlotAgent (GenericWorker) that "
"hosts this SpecializedWorker [default: %default]."))
parser.add_option("--params", action="store", default=None,
help="Create and execute a new hypersearch request using this JSON " \
"format params string. This is helpful for unit tests and debugging. " \
"When specified jobID must NOT be specified. [default: %default].")
parser.add_option("--clearModels", action="store_true", default=False,
help="clear out the models table before starting [default: %default].")
parser.add_option("--resetJobStatus", action="store_true", default=False,
help="Reset the job status before starting [default: %default].")
parser.add_option("--logLevel", action="store", type="int", default=None,
help="override default log level. Pass in an integer value that "
"represents the desired logging level (10=logging.DEBUG, "
"20=logging.INFO, etc.) [default: %default].")
# Evaluate command line arguments
(options, args) = parser.parse_args(argv[1:])
if len(args) != 0:
raise RuntimeError("Expected no command line arguments, but got: %s" % \
(args))
if (options.jobID and options.params):
raise RuntimeError("--jobID and --params can not be used at the same time")
if (options.jobID is None and options.params is None):
raise RuntimeError("Either --jobID or --params must be specified.")
initLogging(verbose=True)
# Instantiate the HypersearchWorker and run it
hst = HypersearchWorker(options, argv[1:])
# Normal use. This is one of among a number of workers. If we encounter
# an exception at the outer loop here, we fail the entire job.
if options.params is None:
try:
jobID = hst.run()
except Exception, e:
jobID = options.jobID
msg = StringIO.StringIO()
print >>msg, "%s: Exception occurred in Hypersearch Worker: %r" % \
(ErrorCodes.hypersearchLogicErr, e)
traceback.print_exc(None, msg)
completionReason = ClientJobsDAO.CMPL_REASON_ERROR
completionMsg = msg.getvalue()
hst.logger.error(completionMsg)
# If no other worker already marked the job as failed, do so now.
jobsDAO = ClientJobsDAO.get()
workerCmpReason = jobsDAO.jobGetFields(options.jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
jobsDAO.jobSetFields(options.jobID, fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = completionMsg),
useConnectionID=False,
ignoreUnchanged=True)
# Run just 1 worker for the entire job. Used for unit tests that run in
# 1 process
else:
jobID = None
completionReason = ClientJobsDAO.CMPL_REASON_SUCCESS
completionMsg = "Success"
try:
jobID = hst.run()
except Exception, e:
jobID = hst._options.jobID
completionReason = ClientJobsDAO.CMPL_REASON_ERROR
completionMsg = "ERROR: %s" % (e,)
raise
finally:
if jobID is not None:
cjDAO = ClientJobsDAO.get()
cjDAO.jobSetCompleted(jobID=jobID,
completionReason=completionReason,
completionMsg=completionMsg)
return jobID | 109,738 |
Instantiate the Hypersearch worker
Parameters:
---------------------------------------------------------------------
options: The command line options. See the main() method for a
description of these options
cmdLineArgs: Copy of the command line arguments, so we can place them
in the log | def __init__(self, options, cmdLineArgs):
# Save options
self._options = options
# Instantiate our logger
self.logger = logging.getLogger(".".join(
['com.numenta.nupic.swarming', self.__class__.__name__]))
# Override log level?
if options.logLevel is not None:
self.logger.setLevel(options.logLevel)
self.logger.info("Launched with command line arguments: %s" %
str(cmdLineArgs))
self.logger.debug("Env variables: %s" % (pprint.pformat(os.environ)))
#self.logger.debug("Value of nupic.hypersearch.modelOrphanIntervalSecs: %s" \
# % Configuration.get('nupic.hypersearch.modelOrphanIntervalSecs'))
# Init random seed
random.seed(42)
# This will hold an instance of a Hypersearch class which handles
# the logic of which models to create/evaluate.
self._hs = None
# -------------------------------------------------------------------------
# These elements form a cache of the update counters we last received for
# the all models in the database. It is used to determine which models we
# have to notify the Hypersearch object that the results have changed.
# This is a dict of modelID -> updateCounter
self._modelIDCtrDict = dict()
# This is the above is a list of tuples: (modelID, updateCounter)
self._modelIDCtrList = []
# This is just the set of modelIDs (keys)
self._modelIDSet = set()
# This will be filled in by run()
self._workerID = None | 109,739 |
Run this worker.
Parameters:
----------------------------------------------------------------------
retval: jobID of the job we ran. This is used by unit test code
when calling this working using the --params command
line option (which tells this worker to insert the job
itself). | def run(self):
# Easier access to options
options = self._options
# ---------------------------------------------------------------------
# Connect to the jobs database
self.logger.info("Connecting to the jobs database")
cjDAO = ClientJobsDAO.get()
# Get our worker ID
self._workerID = cjDAO.getConnectionID()
if options.clearModels:
cjDAO.modelsClearAll()
# -------------------------------------------------------------------------
# if params were specified on the command line, insert a new job using
# them.
if options.params is not None:
options.jobID = cjDAO.jobInsert(client='hwTest', cmdLine="echo 'test mode'",
params=options.params, alreadyRunning=True,
minimumWorkers=1, maximumWorkers=1,
jobType = cjDAO.JOB_TYPE_HS)
if options.workerID is not None:
wID = options.workerID
else:
wID = self._workerID
buildID = Configuration.get('nupic.software.buildNumber', 'N/A')
logPrefix = '<BUILDID=%s, WORKER=HW, WRKID=%s, JOBID=%s> ' % \
(buildID, wID, options.jobID)
ExtendedLogger.setLogPrefix(logPrefix)
# ---------------------------------------------------------------------
# Get the search parameters
# If asked to reset the job status, do that now
if options.resetJobStatus:
cjDAO.jobSetFields(options.jobID,
fields={'workerCompletionReason': ClientJobsDAO.CMPL_REASON_SUCCESS,
'cancel': False,
#'engWorkerState': None
},
useConnectionID=False,
ignoreUnchanged=True)
jobInfo = cjDAO.jobInfo(options.jobID)
self.logger.info("Job info retrieved: %s" % (str(clippedObj(jobInfo))))
# ---------------------------------------------------------------------
# Instantiate the Hypersearch object, which will handle the logic of
# which models to create when we need more to evaluate.
jobParams = json.loads(jobInfo.params)
# Validate job params
jsonSchemaPath = os.path.join(os.path.dirname(__file__),
"jsonschema",
"jobParamsSchema.json")
validate(jobParams, schemaPath=jsonSchemaPath)
hsVersion = jobParams.get('hsVersion', None)
if hsVersion == 'v2':
self._hs = HypersearchV2(searchParams=jobParams, workerID=self._workerID,
cjDAO=cjDAO, jobID=options.jobID, logLevel=options.logLevel)
else:
raise RuntimeError("Invalid Hypersearch implementation (%s) specified" \
% (hsVersion))
# =====================================================================
# The main loop.
try:
exit = False
numModelsTotal = 0
print >>sys.stderr, "reporter:status:Evaluating first model..."
while not exit:
# ------------------------------------------------------------------
# Choose a model to evaluate
batchSize = 10 # How many to try at a time.
modelIDToRun = None
while modelIDToRun is None:
if options.modelID is None:
# -----------------------------------------------------------------
# Get the latest results on all running models and send them to
# the Hypersearch implementation
# This calls cjDAO.modelsGetUpdateCounters(), compares the
# updateCounters with what we have cached, fetches the results for the
# changed and new models, and sends those to the Hypersearch
# implementation's self._hs.recordModelProgress() method.
self._processUpdatedModels(cjDAO)
# --------------------------------------------------------------------
# Create a new batch of models
(exit, newModels) = self._hs.createModels(numModels = batchSize)
if exit:
break
# No more models left to create, just loop. The _hs is waiting for
# all remaining running models to complete, and may pick up on an
# orphan if it detects one.
if len(newModels) == 0:
continue
# Try and insert one that we will run
for (modelParams, modelParamsHash, particleHash) in newModels:
jsonModelParams = json.dumps(modelParams)
(modelID, ours) = cjDAO.modelInsertAndStart(options.jobID,
jsonModelParams, modelParamsHash, particleHash)
# Some other worker is already running it, tell the Hypersearch object
# so that it doesn't try and insert it again
if not ours:
mParamsAndHash = cjDAO.modelsGetParams([modelID])[0]
mResult = cjDAO.modelsGetResultAndStatus([modelID])[0]
results = mResult.results
if results is not None:
results = json.loads(results)
modelParams = json.loads(mParamsAndHash.params)
particleHash = cjDAO.modelsGetFields(modelID,
['engParticleHash'])[0]
particleInst = "%s.%s" % (
modelParams['particleState']['id'],
modelParams['particleState']['genIdx'])
self.logger.info("Adding model %d to our internal DB " \
"because modelInsertAndStart() failed to insert it: " \
"paramsHash=%s, particleHash=%s, particleId='%s'", modelID,
mParamsAndHash.engParamsHash.encode('hex'),
particleHash.encode('hex'), particleInst)
self._hs.recordModelProgress(modelID = modelID,
modelParams = modelParams,
modelParamsHash = mParamsAndHash.engParamsHash,
results = results,
completed = (mResult.status == cjDAO.STATUS_COMPLETED),
completionReason = mResult.completionReason,
matured = mResult.engMatured,
numRecords = mResult.numRecords)
else:
modelIDToRun = modelID
break
else:
# A specific modelID was passed on the command line
modelIDToRun = int(options.modelID)
mParamsAndHash = cjDAO.modelsGetParams([modelIDToRun])[0]
modelParams = json.loads(mParamsAndHash.params)
modelParamsHash = mParamsAndHash.engParamsHash
# Make us the worker
cjDAO.modelSetFields(modelIDToRun,
dict(engWorkerConnId=self._workerID))
if False:
# Change the hash and params of the old entry so that we can
# create a new model with the same params
for attempt in range(1000):
paramsHash = hashlib.md5("OrphanParams.%d.%d" % (modelIDToRun,
attempt)).digest()
particleHash = hashlib.md5("OrphanParticle.%d.%d" % (modelIDToRun,
attempt)).digest()
try:
cjDAO.modelSetFields(modelIDToRun,
dict(engParamsHash=paramsHash,
engParticleHash=particleHash))
success = True
except:
success = False
if success:
break
if not success:
raise RuntimeError("Unexpected failure to change paramsHash and "
"particleHash of orphaned model")
(modelIDToRun, ours) = cjDAO.modelInsertAndStart(options.jobID,
mParamsAndHash.params, modelParamsHash)
# ^^^ end while modelIDToRun ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ---------------------------------------------------------------
# We have a model, evaluate it now
# All done?
if exit:
break
# Run the model now
self.logger.info("RUNNING MODEL GID=%d, paramsHash=%s, params=%s",
modelIDToRun, modelParamsHash.encode('hex'), modelParams)
# ---------------------------------------------------------------------
# Construct model checkpoint GUID for this model:
# jobParams['persistentJobGUID'] contains the client's (e.g., API Server)
# persistent, globally-unique model identifier, which is what we need;
persistentJobGUID = jobParams['persistentJobGUID']
assert persistentJobGUID, "persistentJobGUID: %r" % (persistentJobGUID,)
modelCheckpointGUID = jobInfo.client + "_" + persistentJobGUID + (
'_' + str(modelIDToRun))
self._hs.runModel(modelID=modelIDToRun, jobID = options.jobID,
modelParams=modelParams, modelParamsHash=modelParamsHash,
jobsDAO=cjDAO, modelCheckpointGUID=modelCheckpointGUID)
# TODO: don't increment for orphaned models
numModelsTotal += 1
self.logger.info("COMPLETED MODEL GID=%d; EVALUATED %d MODELs",
modelIDToRun, numModelsTotal)
print >>sys.stderr, "reporter:status:Evaluated %d models..." % \
(numModelsTotal)
print >>sys.stderr, "reporter:counter:HypersearchWorker,numModels,1"
if options.modelID is not None:
exit = True
# ^^^ end while not exit
finally:
# Provide Hypersearch instance an opportunity to clean up temporary files
self._hs.close()
self.logger.info("FINISHED. Evaluated %d models." % (numModelsTotal))
print >>sys.stderr, "reporter:status:Finished, evaluated %d models" % (numModelsTotal)
return options.jobID | 109,741 |
Adds a value over a range of rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
column: The column of data to modify.
start: The first row in the range to modify.
end: The last row in the range to modify.
value: The value to add. | def add(reader, writer, column, start, stop, value):
for i, row in enumerate(reader):
if i >= start and i <= stop:
row[column] = type(value)(row[column]) + value
writer.appendRecord(row) | 109,798 |
Multiplies a value over a range of rows.
Args:
reader: A FileRecordStream object with input data.
writer: A FileRecordStream object to write output data to.
column: The column of data to modify.
start: The first row in the range to modify.
end: The last row in the range to modify.
multiple: The value to scale/multiply by. | def scale(reader, writer, column, start, stop, multiple):
for i, row in enumerate(reader):
if i >= start and i <= stop:
row[column] = type(multiple)(row[column]) * multiple
writer.appendRecord(row) | 109,799 |