id
int64 22
34.9k
| comment_id
int64 0
328
| comment
stringlengths 2
2.55k
| code
stringlengths 31
107k
| classification
stringclasses 6
values | isFinished
bool 1
class | code_context_2
stringlengths 21
27.3k
| code_context_10
stringlengths 29
27.3k
| code_context_20
stringlengths 29
27.3k
|
---|---|---|---|---|---|---|---|---|
16,767 | 1 | // TODO(hbs): allow setting of writeBufferSize | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | IMPLEMENTATION | true | TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) { | public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset()); | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue; |
16,767 | 2 | //
// Since the cal to 'next' may block, we need to first
// check that there is a message available
// | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | NONSATD | true | byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) { | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset()); | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue; |
16,767 | 3 | // Skip data whose MAC was not verified successfully | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | NONSATD | true | data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1); | boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) { | // Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE: |
16,767 | 4 | // Unwrap data if need be | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | NONSATD | true | continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data); | Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage | while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break; |
16,767 | 5 | // Skip data that was not unwrapped successfuly | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | NONSATD | true | data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1); | }
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) { | //
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default: |
16,767 | 6 | //
// Extract KafkaDataMessage
// | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | NONSATD | true | byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try { | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next(); | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1); |
16,767 | 7 | // Sleep a tiny while | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | NONSATD | true | }
} else {
// Sleep a tiny while
try {
Thread.sleep(1L); | encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method | }
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} |
16,767 | 8 | // Set abort to true in case we exit the 'run' method | @Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
return new Runnable() {
@Override
public void run() {
ConsumerIterator<byte[],byte[]> iter = stream.iterator();
byte[] sipHashKey = frontend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_FRONTEND_IN);
byte[] aesKey = frontend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_FRONTEND_IN);
// Iterate on the messages
TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());
KafkaOffsetCounters counters = pool.getCounters();
// TODO(hbs): allow setting of writeBufferSize
try {
while (iter.hasNext()) {
//
// Since the cal to 'next' may block, we need to first
// check that there is a message available
//
boolean nonEmpty = iter.nonEmpty();
if (nonEmpty) {
MessageAndMetadata<byte[], byte[]> msg = iter.next();
counters.count(msg.partition(), msg.offset());
byte[] data = msg.message();
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_MESSAGES, Sensision.EMPTY_LABELS, 1);
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_BYTES, Sensision.EMPTY_LABELS, data.length);
if (null != sipHashKey) {
data = CryptoUtils.removeMAC(sipHashKey, data);
}
// Skip data whose MAC was not verified successfully
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
continue;
}
// Unwrap data if need be
if (null != aesKey) {
data = CryptoUtils.unwrap(aesKey, data);
}
// Skip data that was not unwrapped successfuly
if (null == data) {
Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_FRONTEND_KAFKA_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
continue;
}
//
// Extract KafkaDataMessage
//
KafkaDataMessage tmsg = new KafkaDataMessage();
deserializer.deserialize(tmsg, data);
switch(tmsg.getType()) {
case STORE:
GTSEncoder encoder = new GTSEncoder(0L, null, tmsg.getData());
encoder.setClassId(tmsg.getClassId());
encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | NONSATD | true | t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
} | // Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} | encoder.setLabelsId(tmsg.getLabelsId());
frontend.dispatch(encoder);
break;
case DELETE:
case ARCHIVE:
break;
default:
throw new RuntimeException("Invalid message type.");
}
} else {
// Sleep a tiny while
try {
Thread.sleep(1L);
} catch (InterruptedException ie) {
}
}
}
} catch (Throwable t) {
t.printStackTrace(System.err);
} finally {
// Set abort to true in case we exit the 'run' method
pool.getAbort().set(true);
}
}
};
} |
380 | 0 | // TODO: More checking here - actually parse out tile coord | @Override
public boolean accept(File pathname)
{
// TODO: More checking here - actually parse out tile coord
return pathname.isFile() && pathname.getName().endsWith(".tile");
} | IMPLEMENTATION | true | public boolean accept(File pathname)
{
// TODO: More checking here - actually parse out tile coord
return pathname.isFile() && pathname.getName().endsWith(".tile");
} | @Override
public boolean accept(File pathname)
{
// TODO: More checking here - actually parse out tile coord
return pathname.isFile() && pathname.getName().endsWith(".tile");
} | @Override
public boolean accept(File pathname)
{
// TODO: More checking here - actually parse out tile coord
return pathname.isFile() && pathname.getName().endsWith(".tile");
} |
386 | 0 | /** Constructs storage for {@code double} valued instruments. */ | public static <T> MetricStorage doubleAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableDoubleMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
if (Aggregator.empty() == aggregator) {
return empty();
}
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() {
@Override
public void observe(double value, Attributes attributes) {
T accumulation =
aggregator.accumulateDoubleMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(double value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | NONSATD | true | public static <T> MetricStorage doubleAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableDoubleMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
if (Aggregator.empty() == aggregator) {
return empty();
}
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() {
@Override
public void observe(double value, Attributes attributes) {
T accumulation =
aggregator.accumulateDoubleMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(double value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | public static <T> MetricStorage doubleAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableDoubleMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
if (Aggregator.empty() == aggregator) {
return empty();
}
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() {
@Override
public void observe(double value, Attributes attributes) {
T accumulation =
aggregator.accumulateDoubleMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(double value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | public static <T> MetricStorage doubleAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableDoubleMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
if (Aggregator.empty() == aggregator) {
return empty();
}
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() {
@Override
public void observe(double value, Attributes attributes) {
T accumulation =
aggregator.accumulateDoubleMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(double value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} |
386 | 1 | // TODO: Find a way to grab the measurement JUST ONCE for all async metrics. | public static <T> MetricStorage doubleAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableDoubleMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
if (Aggregator.empty() == aggregator) {
return empty();
}
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() {
@Override
public void observe(double value, Attributes attributes) {
T accumulation =
aggregator.accumulateDoubleMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(double value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | DESIGN | true | }
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() { | InstrumentDescriptor instrument,
Consumer<ObservableDoubleMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
if (Aggregator.empty() == aggregator) {
return empty();
}
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() {
@Override
public void observe(double value, Attributes attributes) {
T accumulation =
aggregator.accumulateDoubleMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
} | public static <T> MetricStorage doubleAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableDoubleMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
if (Aggregator.empty() == aggregator) {
return empty();
}
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableDoubleMeasurement result =
new ObservableDoubleMeasurement() {
@Override
public void observe(double value, Attributes attributes) {
T accumulation =
aggregator.accumulateDoubleMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(double value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} |
387 | 0 | /** Constructs storage for {@code long} valued instruments. */ | public static <T> MetricStorage longAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableLongMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() {
@Override
public void observe(long value, Attributes attributes) {
T accumulation =
aggregator.accumulateLongMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(long value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | NONSATD | true | public static <T> MetricStorage longAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableLongMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() {
@Override
public void observe(long value, Attributes attributes) {
T accumulation =
aggregator.accumulateLongMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(long value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | public static <T> MetricStorage longAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableLongMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() {
@Override
public void observe(long value, Attributes attributes) {
T accumulation =
aggregator.accumulateLongMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(long value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | public static <T> MetricStorage longAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableLongMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() {
@Override
public void observe(long value, Attributes attributes) {
T accumulation =
aggregator.accumulateLongMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(long value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} |
387 | 1 | // TODO: Find a way to grab the measurement JUST ONCE for all async metrics. | public static <T> MetricStorage longAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableLongMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() {
@Override
public void observe(long value, Attributes attributes) {
T accumulation =
aggregator.accumulateLongMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(long value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} | DESIGN | true | final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() { | public static <T> MetricStorage longAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableLongMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() {
@Override
public void observe(long value, Attributes attributes) {
T accumulation =
aggregator.accumulateLongMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
} | public static <T> MetricStorage longAsynchronousAccumulator(
View view,
InstrumentDescriptor instrument,
Consumer<ObservableLongMeasurement> metricUpdater) {
final MetricDescriptor metricDescriptor = MetricDescriptor.create(view, instrument);
Aggregator<T> aggregator =
view.getAggregation().createAggregator(instrument, ExemplarFilter.neverSample());
final AsyncAccumulator<T> measurementAccumulator = new AsyncAccumulator<>(instrument);
final AttributesProcessor attributesProcessor = view.getAttributesProcessor();
// TODO: Find a way to grab the measurement JUST ONCE for all async metrics.
final ObservableLongMeasurement result =
new ObservableLongMeasurement() {
@Override
public void observe(long value, Attributes attributes) {
T accumulation =
aggregator.accumulateLongMeasurement(value, attributes, Context.current());
if (accumulation != null) {
measurementAccumulator.record(
attributesProcessor.process(attributes, Context.current()), accumulation);
}
}
@Override
public void observe(long value) {
observe(value, Attributes.empty());
}
};
return new AsynchronousMetricStorage<>(
metricDescriptor, aggregator, measurementAccumulator, () -> metricUpdater.accept(result));
} |
395 | 0 | /**
* Deviation from spec: throws FileSystemNotFoundException if FileSystem
* hasn't yet been initialized. Call newFileSystem() first.
* Need credentials. Maybe set credentials after? how?
* TODO: we can create a new one if the credentials are present by:
* s3://access-key:[email protected]/
*/ | @Override
public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} | DESIGN | true | @Override
public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} | @Override
public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} | @Override
public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} |
395 | 1 | /**
* TODO: set as a list. one s3FileSystem by region
*/ | @Override
public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} | IMPLEMENTATION | true | public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} | @Override
public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} | @Override
public Path getPath(URI uri) {
FileSystem fileSystem = getFileSystem(uri);
/**
* TODO: set as a list. one s3FileSystem by region
*/
return fileSystem.getPath(uri.getPath());
} |
24,980 | 0 | // how about we update the job exec time when the job starts??? | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.DAY_OF_YEAR, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else if (_aggregationDuration == HOURLY_TIME) {
cal.roll(Calendar.HOUR_OF_DAY, false);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.HOUR_OF_DAY, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else {
endDate = cal.getTime().getTime(); // current time
cal.add(Calendar.MINUTE, -1 * _aggregationDuration);
startDate = cal.getTime().getTime();
}
parse(job, startDate, endDate);
if (_runQuota){
try {
_quotaManager.calculateQuotaUsage();
}
catch (Exception e){
s_logger.error("Exception received while calculating quota", e);
}
try {
_quotaStatement.sendStatement();
} catch (Exception e) {
s_logger.error("Exception received while sending statements", e);
}
try {
_alertManager.checkAndSendQuotaAlertEmails();
} catch (Exception e) {
s_logger.error("Exception received while sending alerts", e);
}
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Not owner of usage job, skipping...");
}
}
if (s_logger.isInfoEnabled()) {
s_logger.info("usage job complete");
}
} | DESIGN | true | s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds) | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false); |
24,980 | 1 | // 2 second buffer since jobs can run a little early (though usually just by milliseconds) | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.DAY_OF_YEAR, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else if (_aggregationDuration == HOURLY_TIME) {
cal.roll(Calendar.HOUR_OF_DAY, false);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.HOUR_OF_DAY, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else {
endDate = cal.getTime().getTime(); // current time
cal.add(Calendar.MINUTE, -1 * _aggregationDuration);
startDate = cal.getTime().getTime();
}
parse(job, startDate, endDate);
if (_runQuota){
try {
_quotaManager.calculateQuotaUsage();
}
catch (Exception e){
s_logger.error("Exception received while calculating quota", e);
}
try {
_quotaStatement.sendStatement();
} catch (Exception e) {
s_logger.error("Exception received while sending statements", e);
}
try {
_alertManager.checkAndSendQuotaAlertEmails();
} catch (Exception e) {
s_logger.error("Exception received while sending alerts", e);
}
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Not owner of usage job, skipping...");
}
}
if (s_logger.isInfoEnabled()) {
s_logger.info("usage job complete");
}
} | NONSATD | true | // how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0); |
24,980 | 2 | // if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time... | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.DAY_OF_YEAR, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else if (_aggregationDuration == HOURLY_TIME) {
cal.roll(Calendar.HOUR_OF_DAY, false);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.HOUR_OF_DAY, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else {
endDate = cal.getTime().getTime(); // current time
cal.add(Calendar.MINUTE, -1 * _aggregationDuration);
startDate = cal.getTime().getTime();
}
parse(job, startDate, endDate);
if (_runQuota){
try {
_quotaManager.calculateQuotaUsage();
}
catch (Exception e){
s_logger.error("Exception received while calculating quota", e);
}
try {
_quotaStatement.sendStatement();
} catch (Exception e) {
s_logger.error("Exception received while sending statements", e);
}
try {
_alertManager.checkAndSendQuotaAlertEmails();
} catch (Exception e) {
s_logger.error("Exception received while sending alerts", e);
}
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Not owner of usage job, skipping...");
}
}
if (s_logger.isInfoEnabled()) {
s_logger.info("usage job complete");
}
} | NONSATD | true | long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
} | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone); | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime(); |
24,980 | 3 | // FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date. | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.DAY_OF_YEAR, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else if (_aggregationDuration == HOURLY_TIME) {
cal.roll(Calendar.HOUR_OF_DAY, false);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.HOUR_OF_DAY, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else {
endDate = cal.getTime().getTime(); // current time
cal.add(Calendar.MINUTE, -1 * _aggregationDuration);
startDate = cal.getTime().getTime();
}
parse(job, startDate, endDate);
if (_runQuota){
try {
_quotaManager.calculateQuotaUsage();
}
catch (Exception e){
s_logger.error("Exception received while calculating quota", e);
}
try {
_quotaStatement.sendStatement();
} catch (Exception e) {
s_logger.error("Exception received while sending statements", e);
}
try {
_alertManager.checkAndSendQuotaAlertEmails();
} catch (Exception e) {
s_logger.error("Exception received while sending alerts", e);
}
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Not owner of usage job, skipping...");
}
}
if (s_logger.isInfoEnabled()) {
s_logger.info("usage job complete");
}
} | DEFECT | true | UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date()); | // how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0); | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.DAY_OF_YEAR, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else if (_aggregationDuration == HOURLY_TIME) {
cal.roll(Calendar.HOUR_OF_DAY, false);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime(); |
24,980 | 4 | // current time | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.DAY_OF_YEAR, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else if (_aggregationDuration == HOURLY_TIME) {
cal.roll(Calendar.HOUR_OF_DAY, false);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.HOUR_OF_DAY, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else {
endDate = cal.getTime().getTime(); // current time
cal.add(Calendar.MINUTE, -1 * _aggregationDuration);
startDate = cal.getTime().getTime();
}
parse(job, startDate, endDate);
if (_runQuota){
try {
_quotaManager.calculateQuotaUsage();
}
catch (Exception e){
s_logger.error("Exception received while calculating quota", e);
}
try {
_quotaStatement.sendStatement();
} catch (Exception e) {
s_logger.error("Exception received while sending statements", e);
}
try {
_alertManager.checkAndSendQuotaAlertEmails();
} catch (Exception e) {
s_logger.error("Exception received while sending alerts", e);
}
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Not owner of usage job, skipping...");
}
}
if (s_logger.isInfoEnabled()) {
s_logger.info("usage job complete");
}
} | NONSATD | true | // For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date()); | // if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0); | protected void runInContextInternal() {
if (s_logger.isInfoEnabled()) {
s_logger.info("starting usage job...");
}
// how about we update the job exec time when the job starts???
long execTime = _jobExecTime.getTimeInMillis();
long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds)
if (execTime < now) {
// if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result
// of scheduleParse() then don't update the next exec time...
_jobExecTime.add(Calendar.MINUTE, _aggregationDuration);
}
UsageJobVO job = _usageJobDao.isOwner(_hostname, _pid);
if (job != null) {
// FIXME: we really need to do a better job of not missing any events...so we should some how
// keep track of the last time usage was run, then go from there...
// For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous
// full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with
// current time as end date.
Calendar cal = Calendar.getInstance(_usageTimezone);
cal.setTime(new Date());
long startDate = 0;
long endDate = 0;
if (_aggregationDuration == DAILY_TIME) {
cal.roll(Calendar.DAY_OF_YEAR, false);
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime();
cal.roll(Calendar.DAY_OF_YEAR, true);
cal.add(Calendar.MILLISECOND, -1);
endDate = cal.getTime().getTime();
} else if (_aggregationDuration == HOURLY_TIME) {
cal.roll(Calendar.HOUR_OF_DAY, false);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
startDate = cal.getTime().getTime(); |
24,981 | 0 | // TODO: Shouldn't we also allow parsing by the type of usage? | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | DESIGN | true | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis(); | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
} | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job |
24,981 | 1 | // 1 millisecond after | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) { | public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success); | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
} |
24,981 | 2 | // everything seemed to work...set endDate as the last success date | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job | if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return; | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null; |
24,981 | 3 | // create a new job if this is a recurring job | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | // everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING); | }
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis); | // TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); |
24,981 | 4 | // now update the accounts in the cloud_usage db | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
} | Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) { | if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter); |
24,981 | 5 | // reset offset | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do { | }
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty()); | Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue()); |
24,981 | 6 | // now update the accounts in the cloud_usage db | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
} | Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) { | if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter); |
24,981 | 7 | // reset offset | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do { | }
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty()); | Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue()); |
24,981 | 8 | // now copy the accounts to cloud_usage db | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
} | _usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0); | }
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
} |
24,981 | 9 | // reset offset | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do { | }
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty()); | Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue()); |
24,981 | 10 | // get all the user stats to create usage records for the network usage | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | // reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) { | Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) { | accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do { |
24,981 | 11 | // now copy the accounts to cloud_usage db | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
} | _usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0); | }
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
} |
24,981 | 12 | // reset offset | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do { | }
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty()); | Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue()); |
24,981 | 13 | // now copy the accounts to cloud_usage db | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
} | _usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0); | }
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
} |
24,981 | 14 | // reset offset | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do { | }
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty()); | Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue()); |
24,981 | 15 | // get all the vm network stats to create usage_VM_network records for the vm network usage | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | // reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) { | Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) { | // now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do { |
24,981 | 16 | // now copy the accounts to cloud_usage db | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
} | _usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0); | }
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
} |
24,981 | 17 | // reset offset | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do { | }
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty()); | Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue()); |
24,981 | 18 | // now copy the accounts to cloud_usage db | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
} | _usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0); | }
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
} |
24,981 | 19 | // TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | IMPLEMENTATION | true | userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); | vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime(); | _usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
} |
24,981 | 20 | // make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event) | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate(); | } finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true); | sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES); |
24,981 | 21 | // - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true); | try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone); | offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) { |
24,981 | 22 | // TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | IMPLEMENTATION | true | }
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone); | startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500); | List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
} |
24,981 | 23 | // Keep track of user stats for an account, across all of its public IPs | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0; | createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) { | Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
} |
24,981 | 24 | // loop over the user stats, create delta entries in the usage_network helper table | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear(); | userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
} | do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do { |
24,981 | 25 | // get vm disk stats in order to compute vm disk usage | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs | if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey = | }
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite()); |
24,981 | 26 | // Keep track of user stats for an account, across all of its public IPs | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0; | createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) { | Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
} |
24,981 | 27 | // loop over the user stats, create delta entries in the usage_disk helper table | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear(); | hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
} | if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate; |
24,981 | 28 | // commit the helper records, then start a new transaction | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start(); | if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate; | }
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter); |
24,981 | 29 | // reset offset | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do { | }
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty()); | Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue()); |
24,981 | 30 | //mark public templates owned by deleted accounts as deleted | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) { | // reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved()); | parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate); |
24,981 | 31 | // FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors? | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | DESIGN | true | s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback(); | offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date | if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) { |
24,981 | 32 | // everything seemed to work...set endDate as the last success date | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job | if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return; | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null; |
24,981 | 33 | // create a new job if this is a recurring job | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | // everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING); | }
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis); | // TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); |
24,981 | 34 | // switch back to CLOUD_DB | @Override
public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) {
// TODO: Shouldn't we also allow parsing by the type of usage?
boolean success = false;
long timeStart = System.currentTimeMillis();
try {
if ((endDateMillis == 0) || (endDateMillis > timeStart)) {
endDateMillis = timeStart;
}
long lastSuccess = _usageJobDao.getLastJobSuccessDateMillis();
if (lastSuccess != 0) {
startDateMillis = lastSuccess + 1; // 1 millisecond after
}
if (startDateMillis >= endDateMillis) {
if (s_logger.isInfoEnabled()) {
s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")");
}
TransactionLegacy jobUpdateTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
jobUpdateTxn.start();
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
jobUpdateTxn.commit();
} finally {
jobUpdateTxn.close();
}
return;
}
Date startDate = new Date(startDateMillis);
Date endDate = new Date(endDateMillis);
if (s_logger.isInfoEnabled()) {
s_logger.info("Parsing usage records between " + startDate + " and " + endDate);
}
List<AccountVO> accounts = null;
List<UserStatisticsVO> userStats = null;
Map<String, UsageNetworkVO> networkStats = null;
List<VmDiskStatisticsVO> vmDiskStats = null;
Map<String, UsageVmDiskVO> vmDiskUsages = null;
TransactionLegacy userTxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
try {
Long limit = Long.valueOf(500);
Long offset = Long.valueOf(0);
Long lastAccountId = _usageDao.getLastAccountId();
if (lastAccountId == null) {
lastAccountId = Long.valueOf(0);
}
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findActiveAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now update the accounts in the cloud_usage db
_usageDao.updateAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findNewAccounts(lastAccountId, filter);
if ((accounts != null) && !accounts.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveAccounts(accounts);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the user stats to create usage records for the network usage
Long lastUserStatsId = _usageDao.getLastUserStatsId();
if (lastUserStatsId == null) {
lastUserStatsId = Long.valueOf(0);
}
SearchCriteria<UserStatisticsVO> sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc2 = _userStatsDao.createSearchCriteria();
sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId);
do {
Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit);
userStats = _userStatsDao.search(sc2, filter);
if ((userStats != null) && !userStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveUserStats(userStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((userStats != null) && !userStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
// get all the vm network stats to create usage_VM_network records for the vm network usage
Long lastVmDiskStatsId = _usageDao.getLastVmDiskStatsId();
if (lastVmDiskStatsId == null) {
lastVmDiskStatsId = Long.valueOf(0);
}
SearchCriteria<VmDiskStatisticsVO> sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.LTEQ, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.updateVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
// reset offset
offset = Long.valueOf(0);
sc4 = _vmDiskStatsDao.createSearchCriteria();
sc4.addAnd("id", SearchCriteria.Op.GT, lastVmDiskStatsId);
do {
Filter filter = new Filter(VmDiskStatisticsVO.class, "id", true, offset, limit);
vmDiskStats = _vmDiskStatsDao.search(sc4, filter);
if ((vmDiskStats != null) && !vmDiskStats.isEmpty()) {
// now copy the accounts to cloud_usage db
_usageDao.saveVmDiskStats(vmDiskStats);
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((vmDiskStats != null) && !vmDiskStats.isEmpty());
} finally {
userTxn.close();
}
// TODO: Fetch a maximum number of events and process them before moving on to the next range of events
// - get a list of the latest events
// - insert the latest events into the usage.events table
List<UsageEventVO> events = _usageEventDao.getRecentEvents(new Date(endDateMillis));
TransactionLegacy usageTxn = TransactionLegacy.open(TransactionLegacy.USAGE_DB);
try {
usageTxn.start();
// make sure start date is before all of our un-processed events (the events are ordered oldest
// to newest, so just test against the first event)
if ((events != null) && (events.size() > 0)) {
Date oldestEventDate = events.get(0).getCreateDate();
if (oldestEventDate.getTime() < startDateMillis) {
startDateMillis = oldestEventDate.getTime();
startDate = new Date(startDateMillis);
}
// - loop over the list of events and create entries in the helper tables
// - create the usage records using the parse methods below
for (UsageEventVO event : events) {
event.setProcessed(true);
_usageEventDao.update(event.getId(), event);
createHelperRecord(event);
}
}
// TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats
// get user stats in order to compute network usage
networkStats = _usageNetworkDao.getRecentNetworkStats();
Calendar recentlyDeletedCal = Calendar.getInstance(_usageTimezone);
recentlyDeletedCal.setTimeInMillis(startDateMillis);
recentlyDeletedCal.add(Calendar.MINUTE, -1 * THREE_DAYS_IN_MINUTES);
Date recentlyDeletedDate = recentlyDeletedCal.getTime();
// Keep track of user stats for an account, across all of its public IPs
Map<String, UserStatisticsVO> aggregatedStats = new HashMap<String, UserStatisticsVO>();
int startIndex = 0;
do {
userStats = _userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (userStats != null) {
for (UserStatisticsVO userStat : userStats) {
if (userStat.getDeviceId() != null) {
String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId() + "-Host-" + userStat.getDeviceId();
UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), userStat.getDeviceId(),
userStat.getDeviceType(), userStat.getNetworkId());
}
hostAggregatedStat.setAggBytesSent(hostAggregatedStat.getAggBytesSent() + userStat.getAggBytesSent());
hostAggregatedStat.setAggBytesReceived(hostAggregatedStat.getAggBytesReceived() + userStat.getAggBytesReceived());
aggregatedStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_network helper table
int numAcctsProcessed = 0;
usageNetworks.clear();
for (String key : aggregatedStats.keySet()) {
UsageNetworkVO currentNetworkStats = null;
if (networkStats != null) {
currentNetworkStats = networkStats.get(key);
}
createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis);
numAcctsProcessed++;
}
_usageNetworkDao.saveUsageNetworks(usageNetworks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts");
}
// get vm disk stats in order to compute vm disk usage
vmDiskUsages = _usageVmDiskDao.getRecentVmDiskStats();
// Keep track of user stats for an account, across all of its public IPs
Map<String, VmDiskStatisticsVO> aggregatedDiskStats = new HashMap<String, VmDiskStatisticsVO>();
startIndex = 0;
do {
vmDiskStats = _vmDiskStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500);
if (vmDiskUsages != null) {
for (VmDiskStatisticsVO vmDiskStat : vmDiskStats) {
if (vmDiskStat.getVmId() != null) {
String hostKey =
vmDiskStat.getDataCenterId() + "-" + vmDiskStat.getAccountId() + "-Vm-" + vmDiskStat.getVmId() + "-Disk-" + vmDiskStat.getVolumeId();
VmDiskStatisticsVO hostAggregatedStat = aggregatedDiskStats.get(hostKey);
if (hostAggregatedStat == null) {
hostAggregatedStat =
new VmDiskStatisticsVO(vmDiskStat.getAccountId(), vmDiskStat.getDataCenterId(), vmDiskStat.getVmId(), vmDiskStat.getVolumeId());
}
hostAggregatedStat.setAggIORead(hostAggregatedStat.getAggIORead() + vmDiskStat.getAggIORead());
hostAggregatedStat.setAggIOWrite(hostAggregatedStat.getAggIOWrite() + vmDiskStat.getAggIOWrite());
hostAggregatedStat.setAggBytesRead(hostAggregatedStat.getAggBytesRead() + vmDiskStat.getAggBytesRead());
hostAggregatedStat.setAggBytesWrite(hostAggregatedStat.getAggBytesWrite() + vmDiskStat.getAggBytesWrite());
aggregatedDiskStats.put(hostKey, hostAggregatedStat);
}
}
}
startIndex += 500;
} while ((userStats != null) && !userStats.isEmpty());
// loop over the user stats, create delta entries in the usage_disk helper table
numAcctsProcessed = 0;
usageVmDisks.clear();
for (String key : aggregatedDiskStats.keySet()) {
UsageVmDiskVO currentVmDiskStats = null;
if (vmDiskStats != null) {
currentVmDiskStats = vmDiskUsages.get(key);
}
createVmDiskHelperEntry(aggregatedDiskStats.get(key), currentVmDiskStats, endDateMillis);
numAcctsProcessed++;
}
_usageVmDiskDao.saveUsageVmDisks(usageVmDisks);
if (s_logger.isDebugEnabled()) {
s_logger.debug("created vm disk stats helper entries for " + numAcctsProcessed + " accts");
}
// commit the helper records, then start a new transaction
usageTxn.commit();
usageTxn.start();
boolean parsed = false;
numAcctsProcessed = 0;
Date currentStartDate = startDate;
Date currentEndDate = endDate;
Date tempDate = endDate;
Calendar aggregateCal = Calendar.getInstance(_usageTimezone);
while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)) {
currentEndDate = tempDate;
aggregateCal.setTime(tempDate);
aggregateCal.add(Calendar.MINUTE, -_aggregationDuration);
tempDate = aggregateCal.getTime();
}
while (!currentEndDate.after(endDate) || (currentEndDate.getTime() - endDate.getTime() < 60000)) {
Long offset = Long.valueOf(0);
Long limit = Long.valueOf(500);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.listAll(filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts");
}
numAcctsProcessed = 0;
// reset offset
offset = Long.valueOf(0);
do {
Filter filter = new Filter(AccountVO.class, "id", true, offset, limit);
accounts = _accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter);
if ((accounts != null) && !accounts.isEmpty()) {
for (AccountVO account : accounts) {
parsed = parseHelperTables(account, currentStartDate, currentEndDate);
List<Long> publicTemplates = _usageDao.listPublicTemplatesByAccount(account.getId());
for (Long templateId : publicTemplates) {
//mark public templates owned by deleted accounts as deleted
List<UsageStorageVO> storageVOs = _usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE);
if (storageVOs.size() > 1) {
s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() +
"; marking them all as deleted...");
}
for (UsageStorageVO storageVO : storageVOs) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId());
}
storageVO.setDeleted(account.getRemoved());
_usageStorageDao.update(storageVO);
}
}
numAcctsProcessed++;
}
}
offset = new Long(offset.longValue() + limit.longValue());
} while ((accounts != null) && !accounts.isEmpty());
currentStartDate = new Date(currentEndDate.getTime() + 1);
aggregateCal.setTime(currentEndDate);
aggregateCal.add(Calendar.MINUTE, _aggregationDuration);
currentEndDate = aggregateCal.getTime();
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts");
}
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} | NONSATD | true | usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) { | usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) { | }
// FIXME: we don't break the above loop if something fails to parse, so it gets reset every account,
// do we want to break out of processing accounts and rollback if there are errors?
if (!parsed) {
usageTxn.rollback();
} else {
success = true;
}
} catch (Exception ex) {
s_logger.error("Exception in usage manager", ex);
usageTxn.rollback();
} finally {
// everything seemed to work...set endDate as the last success date
_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success);
// create a new job if this is a recurring job
if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) {
_usageJobDao.createNewJob(_hostname, _pid, UsageJobVO.JOB_TYPE_RECURRING);
}
usageTxn.commit();
usageTxn.close();
// switch back to CLOUD_DB
TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB);
if (!success) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: " + job.getId(),
"Usage job failed. Job id: " + job.getId());
} else {
_alertMgr.clearAlert(AlertManager.AlertType.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0);
}
swap.close();
}
} catch (Exception e) {
s_logger.error("Usage Manager error", e);
}
} |
33,175 | 0 | //FIXME: do this without the suppresswarnings tag | @SuppressWarnings("unchecked")
public static <T> T[] toArray(List<T> list){
return (T[]) list.toArray();
} | DESIGN | true | @SuppressWarnings("unchecked")
public static <T> T[] toArray(List<T> list){
return (T[]) list.toArray();
} | @SuppressWarnings("unchecked")
public static <T> T[] toArray(List<T> list){
return (T[]) list.toArray();
} | @SuppressWarnings("unchecked")
public static <T> T[] toArray(List<T> list){
return (T[]) list.toArray();
} |
408 | 0 | // Chrome sucks.
// http://dev.equella.com/issues/8025
// http://dev.equella.com/issues/5612 | @SuppressWarnings("nls")
@Override
protected FilterResult doFilterInternal(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
response = new IgnoreContentWrapper(response);
response.addHeader("P3P", "CP=\"CAO PSA OUR\"");
response.setHeader("X-Content-Type-Options", "nosniff");
// Chrome sucks.
// http://dev.equella.com/issues/8025
// http://dev.equella.com/issues/5612
String ua = request.getHeader("User-Agent");
if (ua != null && ua.contains("Chrome")) {
response.addHeader("X-XSS-Protection", "0");
} else {
response.setHeader("X-XSS-Protection", "1; mode=block");
}
if (stsMaxAge != -1) {
response.setHeader(
"Strict-Transport-Security", "max-age=" + stsMaxAge + "; includeSubDomains");
}
return new FilterResult(response);
} | DEFECT | true | response.addHeader("P3P", "CP=\"CAO PSA OUR\"");
response.setHeader("X-Content-Type-Options", "nosniff");
// Chrome sucks.
// http://dev.equella.com/issues/8025
// http://dev.equella.com/issues/5612
String ua = request.getHeader("User-Agent");
if (ua != null && ua.contains("Chrome")) { | @SuppressWarnings("nls")
@Override
protected FilterResult doFilterInternal(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
response = new IgnoreContentWrapper(response);
response.addHeader("P3P", "CP=\"CAO PSA OUR\"");
response.setHeader("X-Content-Type-Options", "nosniff");
// Chrome sucks.
// http://dev.equella.com/issues/8025
// http://dev.equella.com/issues/5612
String ua = request.getHeader("User-Agent");
if (ua != null && ua.contains("Chrome")) {
response.addHeader("X-XSS-Protection", "0");
} else {
response.setHeader("X-XSS-Protection", "1; mode=block");
}
if (stsMaxAge != -1) {
response.setHeader(
"Strict-Transport-Security", "max-age=" + stsMaxAge + "; includeSubDomains");
} | @SuppressWarnings("nls")
@Override
protected FilterResult doFilterInternal(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
response = new IgnoreContentWrapper(response);
response.addHeader("P3P", "CP=\"CAO PSA OUR\"");
response.setHeader("X-Content-Type-Options", "nosniff");
// Chrome sucks.
// http://dev.equella.com/issues/8025
// http://dev.equella.com/issues/5612
String ua = request.getHeader("User-Agent");
if (ua != null && ua.contains("Chrome")) {
response.addHeader("X-XSS-Protection", "0");
} else {
response.setHeader("X-XSS-Protection", "1; mode=block");
}
if (stsMaxAge != -1) {
response.setHeader(
"Strict-Transport-Security", "max-age=" + stsMaxAge + "; includeSubDomains");
}
return new FilterResult(response);
} |
16,797 | 0 | /**
* Returns a new test listener.
* @param ads should RDS for the listener be configured to use XDS?
* @param rdsTransportVersion the transport_api_version that should be set for RDS
* @param rdsResourceVersion the resource_api_version that should be set for RDS
* @param listenerName name of the new listener
* @param port port to use for the listener
* @param routeName name of the test route that is associated with this listener
*/ | public static Listener createListener(boolean ads,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsTransportVersion,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsResourceVersion, String listenerName,
int port, String routeName) {
ConfigSource.Builder configSourceBuilder = ConfigSource.newBuilder()
.setResourceApiVersion(rdsResourceVersion);
ConfigSource rdsSource = ads
? configSourceBuilder
.setAds(AggregatedConfigSource.getDefaultInstance())
.build()
: configSourceBuilder
.setApiConfigSource(ApiConfigSource.newBuilder()
.setApiType(ApiType.GRPC)
.setTransportApiVersion(rdsTransportVersion)
.addGrpcServices(GrpcService.newBuilder()
.setEnvoyGrpc(EnvoyGrpc.newBuilder()
.setClusterName(XDS_CLUSTER))))
.build();
HttpConnectionManager manager = HttpConnectionManager.newBuilder()
.setCodecType(CodecType.AUTO)
.setStatPrefix("http")
.setRds(Rds.newBuilder()
.setConfigSource(rdsSource)
.setRouteConfigName(routeName))
.addHttpFilters(HttpFilter.newBuilder()
.setName(Resources.FILTER_ENVOY_ROUTER))
.build();
return Listener.newBuilder()
.setName(listenerName)
.setAddress(Address.newBuilder()
.setSocketAddress(SocketAddress.newBuilder()
.setAddress(ANY_ADDRESS)
.setPortValue(port)
.setProtocol(Protocol.TCP)))
.addFilterChains(FilterChain.newBuilder()
.addFilters(Filter.newBuilder()
.setName(Resources.FILTER_HTTP_CONNECTION_MANAGER)
.setTypedConfig(Any.pack(manager))))
.build();
} | NONSATD | true | public static Listener createListener(boolean ads,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsTransportVersion,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsResourceVersion, String listenerName,
int port, String routeName) {
ConfigSource.Builder configSourceBuilder = ConfigSource.newBuilder()
.setResourceApiVersion(rdsResourceVersion);
ConfigSource rdsSource = ads
? configSourceBuilder
.setAds(AggregatedConfigSource.getDefaultInstance())
.build()
: configSourceBuilder
.setApiConfigSource(ApiConfigSource.newBuilder()
.setApiType(ApiType.GRPC)
.setTransportApiVersion(rdsTransportVersion)
.addGrpcServices(GrpcService.newBuilder()
.setEnvoyGrpc(EnvoyGrpc.newBuilder()
.setClusterName(XDS_CLUSTER))))
.build();
HttpConnectionManager manager = HttpConnectionManager.newBuilder()
.setCodecType(CodecType.AUTO)
.setStatPrefix("http")
.setRds(Rds.newBuilder()
.setConfigSource(rdsSource)
.setRouteConfigName(routeName))
.addHttpFilters(HttpFilter.newBuilder()
.setName(Resources.FILTER_ENVOY_ROUTER))
.build();
return Listener.newBuilder()
.setName(listenerName)
.setAddress(Address.newBuilder()
.setSocketAddress(SocketAddress.newBuilder()
.setAddress(ANY_ADDRESS)
.setPortValue(port)
.setProtocol(Protocol.TCP)))
.addFilterChains(FilterChain.newBuilder()
.addFilters(Filter.newBuilder()
.setName(Resources.FILTER_HTTP_CONNECTION_MANAGER)
.setTypedConfig(Any.pack(manager))))
.build();
} | public static Listener createListener(boolean ads,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsTransportVersion,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsResourceVersion, String listenerName,
int port, String routeName) {
ConfigSource.Builder configSourceBuilder = ConfigSource.newBuilder()
.setResourceApiVersion(rdsResourceVersion);
ConfigSource rdsSource = ads
? configSourceBuilder
.setAds(AggregatedConfigSource.getDefaultInstance())
.build()
: configSourceBuilder
.setApiConfigSource(ApiConfigSource.newBuilder()
.setApiType(ApiType.GRPC)
.setTransportApiVersion(rdsTransportVersion)
.addGrpcServices(GrpcService.newBuilder()
.setEnvoyGrpc(EnvoyGrpc.newBuilder()
.setClusterName(XDS_CLUSTER))))
.build();
HttpConnectionManager manager = HttpConnectionManager.newBuilder()
.setCodecType(CodecType.AUTO)
.setStatPrefix("http")
.setRds(Rds.newBuilder()
.setConfigSource(rdsSource)
.setRouteConfigName(routeName))
.addHttpFilters(HttpFilter.newBuilder()
.setName(Resources.FILTER_ENVOY_ROUTER))
.build();
return Listener.newBuilder()
.setName(listenerName)
.setAddress(Address.newBuilder()
.setSocketAddress(SocketAddress.newBuilder()
.setAddress(ANY_ADDRESS)
.setPortValue(port)
.setProtocol(Protocol.TCP)))
.addFilterChains(FilterChain.newBuilder()
.addFilters(Filter.newBuilder()
.setName(Resources.FILTER_HTTP_CONNECTION_MANAGER)
.setTypedConfig(Any.pack(manager))))
.build();
} | public static Listener createListener(boolean ads,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsTransportVersion,
io.envoyproxy.envoy.api.v2.core.ApiVersion rdsResourceVersion, String listenerName,
int port, String routeName) {
ConfigSource.Builder configSourceBuilder = ConfigSource.newBuilder()
.setResourceApiVersion(rdsResourceVersion);
ConfigSource rdsSource = ads
? configSourceBuilder
.setAds(AggregatedConfigSource.getDefaultInstance())
.build()
: configSourceBuilder
.setApiConfigSource(ApiConfigSource.newBuilder()
.setApiType(ApiType.GRPC)
.setTransportApiVersion(rdsTransportVersion)
.addGrpcServices(GrpcService.newBuilder()
.setEnvoyGrpc(EnvoyGrpc.newBuilder()
.setClusterName(XDS_CLUSTER))))
.build();
HttpConnectionManager manager = HttpConnectionManager.newBuilder()
.setCodecType(CodecType.AUTO)
.setStatPrefix("http")
.setRds(Rds.newBuilder()
.setConfigSource(rdsSource)
.setRouteConfigName(routeName))
.addHttpFilters(HttpFilter.newBuilder()
.setName(Resources.FILTER_ENVOY_ROUTER))
.build();
return Listener.newBuilder()
.setName(listenerName)
.setAddress(Address.newBuilder()
.setSocketAddress(SocketAddress.newBuilder()
.setAddress(ANY_ADDRESS)
.setPortValue(port)
.setProtocol(Protocol.TCP)))
.addFilterChains(FilterChain.newBuilder()
.addFilters(Filter.newBuilder()
.setName(Resources.FILTER_HTTP_CONNECTION_MANAGER)
.setTypedConfig(Any.pack(manager))))
.build();
} |
8,611 | 0 | // differentiate clones in this group | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} | NONSATD | true | public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){ | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute(); | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} |
8,611 | 1 | // if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} | DESIGN | true | // differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1); | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} |
8,611 | 2 | // } | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} | NONSATD | true | comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} | GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} | @Override
public void actionPerformed(ActionEvent e) {
if(currSel!= null && currSel instanceof GroupTreeNode){
// differentiate clones in this group
GroupTreeNode gtn = (GroupTreeNode)currSel;
// if(gtn.getChildCount() == 2){
// TODO: currently we only support two way comparison
CloneTreeNode ctn1 = (CloneTreeNode)gtn.getChildAt(0);
CloneTreeNode ctn2 = (CloneTreeNode)gtn.getChildAt(1);
String file1 = ctn1.getFile();
String file2 = ctn2.getFile();
CloneComparison comparison = new CloneComparison(panel, new File(file1), new File(file2), ctn1.start, ctn1.end, ctn2.start, ctn2.end);
comparison.setOpenInBackground(false);
comparison.execute();
// }
}
} |
24,996 | 0 | /**
* The following actions are performed:
*
* <ul>
* <li>Set fieldBindModelPath to the collection model path (since the fields
* have to belong to the same model as the collection)</li>
* <li>Set defaults for binding</li>
* <li>Calls view helper service to initialize prototypes</li>
* </ul>
*/ | @Override
public void performInitialization(Object model) {
setFieldBindingObjectPath(getBindingInfo().getBindingObjectPath());
super.performInitialization(model);
if (bindingInfo != null) {
bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} | NONSATD | true | @Override
public void performInitialization(Object model) {
setFieldBindingObjectPath(getBindingInfo().getBindingObjectPath());
super.performInitialization(model);
if (bindingInfo != null) {
bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} | @Override
public void performInitialization(Object model) {
setFieldBindingObjectPath(getBindingInfo().getBindingObjectPath());
super.performInitialization(model);
if (bindingInfo != null) {
bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} | @Override
public void performInitialization(Object model) {
setFieldBindingObjectPath(getBindingInfo().getBindingObjectPath());
super.performInitialization(model);
if (bindingInfo != null) {
bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} |
24,996 | 1 | // TODO: set object path for prototypes equal to the tree group object path? | @Override
public void performInitialization(Object model) {
setFieldBindingObjectPath(getBindingInfo().getBindingObjectPath());
super.performInitialization(model);
if (bindingInfo != null) {
bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} | IMPLEMENTATION | true | bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} | @Override
public void performInitialization(Object model) {
setFieldBindingObjectPath(getBindingInfo().getBindingObjectPath());
super.performInitialization(model);
if (bindingInfo != null) {
bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} | @Override
public void performInitialization(Object model) {
setFieldBindingObjectPath(getBindingInfo().getBindingObjectPath());
super.performInitialization(model);
if (bindingInfo != null) {
bindingInfo.setDefaults(ViewLifecycle.getActiveLifecycle().getView(), getPropertyName());
}
// TODO: set object path for prototypes equal to the tree group object path?
} |
16,806 | 0 | // Inflate the layout for this fragment | @Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
Bundle args = getArguments();
TierCategory category = (TierCategory) args.getSerializable("category");
String tier = args.getString("tier");
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout);
try {//TODO: see if we can be more error-tolerance
myTask = new PopulateTierTableAsyncTask(getActivity(), layout, category).execute(tier);
} catch (Exception e) {
e.printStackTrace();
}
return view;
} | NONSATD | true | TierCategory category = (TierCategory) args.getSerializable("category");
String tier = args.getString("tier");
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout); | @Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
Bundle args = getArguments();
TierCategory category = (TierCategory) args.getSerializable("category");
String tier = args.getString("tier");
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout);
try {//TODO: see if we can be more error-tolerance
myTask = new PopulateTierTableAsyncTask(getActivity(), layout, category).execute(tier);
} catch (Exception e) {
e.printStackTrace();
}
return view;
} | @Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
Bundle args = getArguments();
TierCategory category = (TierCategory) args.getSerializable("category");
String tier = args.getString("tier");
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout);
try {//TODO: see if we can be more error-tolerance
myTask = new PopulateTierTableAsyncTask(getActivity(), layout, category).execute(tier);
} catch (Exception e) {
e.printStackTrace();
}
return view;
} |
16,806 | 1 | //TODO: see if we can be more error-tolerance | @Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
Bundle args = getArguments();
TierCategory category = (TierCategory) args.getSerializable("category");
String tier = args.getString("tier");
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout);
try {//TODO: see if we can be more error-tolerance
myTask = new PopulateTierTableAsyncTask(getActivity(), layout, category).execute(tier);
} catch (Exception e) {
e.printStackTrace();
}
return view;
} | DESIGN | true | View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout);
try {//TODO: see if we can be more error-tolerance
myTask = new PopulateTierTableAsyncTask(getActivity(), layout, category).execute(tier);
} catch (Exception e) { | @Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
Bundle args = getArguments();
TierCategory category = (TierCategory) args.getSerializable("category");
String tier = args.getString("tier");
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout);
try {//TODO: see if we can be more error-tolerance
myTask = new PopulateTierTableAsyncTask(getActivity(), layout, category).execute(tier);
} catch (Exception e) {
e.printStackTrace();
}
return view;
} | @Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
Bundle args = getArguments();
TierCategory category = (TierCategory) args.getSerializable("category");
String tier = args.getString("tier");
// Inflate the layout for this fragment
View view = inflater.inflate(R.layout.fragment_tier_list, container, false);
LinearLayout layout = (LinearLayout) view.findViewById(R.id.tier_layout);
try {//TODO: see if we can be more error-tolerance
myTask = new PopulateTierTableAsyncTask(getActivity(), layout, category).execute(tier);
} catch (Exception e) {
e.printStackTrace();
}
return view;
} |
16,807 | 0 | // instead of try-catch | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable"); | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex); | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else { |
16,807 | 1 | // calculate the width of the images to be displayed later on | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | }
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point(); | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0; | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView); |
16,807 | 2 | // set it to be 1/10 of the screen width | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex); | protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue; | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact |
16,807 | 3 | // get all rows in each table | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) { | Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity); | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView); |
16,807 | 4 | // row 1 is the column headers. This may be different in the DOM in browser | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
} | int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src"); | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical |
16,807 | 5 | // get the thubnail image src | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src"); | for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity); | Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag()); |
16,807 | 6 | // the height's not exact | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it | continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr); | int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
} |
16,807 | 7 | // get the scaled image link and display it | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView); | else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() { | Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table); |
16,807 | 8 | //center vertical | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | NONSATD | true | tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName); | if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
}); | // row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} |
16,807 | 9 | //TODO: center the spinner horizontally
//remove the spinner | @Override
protected void onPostExecute(Void param) {
if (pageDOM == null) {
return; // instead of try-catch
}
Elements tierTables = pageDOM.getElementsByClass("wikitable");
// calculate the width of the images to be displayed later on
Display display = activity.getWindowManager().getDefaultDisplay();
Point size = new Point();
display.getSize(size);
int screenWidth = size.x;
int scaleWidth = screenWidth / 10; // set it to be 1/10 of the screen width
int tableIndex = tierMap.get(category).get(tier);
Element tierTable = tierTables.get(tableIndex);
TableLayout table = new TableLayout(activity);
Elements rows = tierTable.getElementsByTag("tbody").first().getElementsByTag("tr"); // get all rows in each table
int countRow = 0;
for (Element row : rows) {
countRow++;
if (countRow == 1) {
// row 1 is the column headers. This may be different in the DOM in browser
continue;
}
else {
Elements cells = row.getElementsByTag("td");
TableRow tr = new TableRow(activity);
ImageView imgView = new ImageView(activity); tr.addView(imgView);
// get the thubnail image src
Element link = row.getElementsByTag("a").first();
String imgSrc = link.getElementsByTag("img").first().attr("data-src");
if (imgSrc == null || imgSrc.equals("")) imgSrc = link.getElementsByTag("img").first().attr("src");
imgView.setLayoutParams(new TableRow.LayoutParams(scaleWidth, (int) (scaleWidth*1.5))); // the height's not exact
imgView.setScaleType(ImageView.ScaleType.FIT_CENTER);
// get the scaled image link and display it
String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | IMPLEMENTATION | true | }
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar); | @Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} | String newScaledLink = Util.getScaledWikiaImageLink(imgSrc, scaleWidth);
ImageLoader.getInstance().displayImage(newScaledLink, imgView);
String famName = cells.get(2).text();
TextView tv = new TextView(activity);
tv.setText(famName);
tr.addView(tv);
tr.setGravity(0x10); //center vertical
table.addView(tr);
tr.setTag(famName);
tr.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(activity, FamDetailActivity.class);
intent.putExtra(MainActivity.FAM_NAME, (String) v.getTag());
activity.startActivity(intent);
}
});
}
}
layout.addView(table);
//TODO: center the spinner horizontally
//remove the spinner
ProgressBar progressBar = (ProgressBar) activity.findViewById(R.id.progressBar_tierTable);
layout.removeView(progressBar);
} |
16,808 | 0 | // TODO For now we only sync cloud ids during full sync. We should eventually allow more granular syncs (actor level and group level sync). | public SyncStatusDetail synchronizeStack(Stack stack, UmsUsersState umsUsersState, UserSyncOptions options) {
MDCBuilder.buildMdcContext(stack);
String environmentCrn = stack.getEnvironmentCrn();
Multimap<String, String> warnings = ArrayListMultimap.create();
try {
FreeIpaClient freeIpaClient = freeIpaClientFactory.getFreeIpaClientForStack(stack);
UsersStateDifference usersStateDifferenceBeforeSync = compareUmsAndFreeIpa(umsUsersState, options, freeIpaClient);
stateApplier.applyDifference(umsUsersState, environmentCrn, warnings, usersStateDifferenceBeforeSync, options, freeIpaClient);
retrySyncIfBatchCallHasWarnings(stack, umsUsersState, warnings, options, freeIpaClient, usersStateDifferenceBeforeSync);
if (options.isFullSync()) {
// TODO For now we only sync cloud ids during full sync. We should eventually allow more granular syncs (actor level and group level sync).
if (entitlementService.cloudIdentityMappingEnabled(stack.getAccountId())) {
LOGGER.debug("Starting {} ...", SYNC_CLOUD_IDENTITIES);
cloudIdentitySyncService.syncCloudIdentities(stack, umsUsersState, warnings::put);
LOGGER.debug("Finished {}.", SYNC_CLOUD_IDENTITIES);
}
if (entitlementService.isEnvironmentPrivilegedUserEnabled(stack.getAccountId())) {
LOGGER.debug("Starting {} ...", ADD_SUDO_RULES);
try {
sudoRuleService.setupSudoRule(stack, freeIpaClient);
} catch (Exception e) {
warnings.put(stack.getEnvironmentCrn(), e.getMessage());
LOGGER.error("{} failed for environment '{}'.", ADD_SUDO_RULES, stack.getEnvironmentCrn(), e);
}
LOGGER.debug("Finished {}.", ADD_SUDO_RULES);
}
}
return toSyncStatusDetail(environmentCrn, warnings);
} catch (TimeoutException e) {
LOGGER.warn("Timed out while synchronizing environment {}", environmentCrn, e);
return SyncStatusDetail.fail(environmentCrn, "Timed out", warnings);
} catch (Exception e) {
LOGGER.warn("Failed to synchronize environment {}", environmentCrn, e);
return SyncStatusDetail.fail(environmentCrn, e.getLocalizedMessage(), warnings);
}
} | IMPLEMENTATION | true | retrySyncIfBatchCallHasWarnings(stack, umsUsersState, warnings, options, freeIpaClient, usersStateDifferenceBeforeSync);
if (options.isFullSync()) {
// TODO For now we only sync cloud ids during full sync. We should eventually allow more granular syncs (actor level and group level sync).
if (entitlementService.cloudIdentityMappingEnabled(stack.getAccountId())) {
LOGGER.debug("Starting {} ...", SYNC_CLOUD_IDENTITIES); | public SyncStatusDetail synchronizeStack(Stack stack, UmsUsersState umsUsersState, UserSyncOptions options) {
MDCBuilder.buildMdcContext(stack);
String environmentCrn = stack.getEnvironmentCrn();
Multimap<String, String> warnings = ArrayListMultimap.create();
try {
FreeIpaClient freeIpaClient = freeIpaClientFactory.getFreeIpaClientForStack(stack);
UsersStateDifference usersStateDifferenceBeforeSync = compareUmsAndFreeIpa(umsUsersState, options, freeIpaClient);
stateApplier.applyDifference(umsUsersState, environmentCrn, warnings, usersStateDifferenceBeforeSync, options, freeIpaClient);
retrySyncIfBatchCallHasWarnings(stack, umsUsersState, warnings, options, freeIpaClient, usersStateDifferenceBeforeSync);
if (options.isFullSync()) {
// TODO For now we only sync cloud ids during full sync. We should eventually allow more granular syncs (actor level and group level sync).
if (entitlementService.cloudIdentityMappingEnabled(stack.getAccountId())) {
LOGGER.debug("Starting {} ...", SYNC_CLOUD_IDENTITIES);
cloudIdentitySyncService.syncCloudIdentities(stack, umsUsersState, warnings::put);
LOGGER.debug("Finished {}.", SYNC_CLOUD_IDENTITIES);
}
if (entitlementService.isEnvironmentPrivilegedUserEnabled(stack.getAccountId())) {
LOGGER.debug("Starting {} ...", ADD_SUDO_RULES);
try {
sudoRuleService.setupSudoRule(stack, freeIpaClient);
} catch (Exception e) { | public SyncStatusDetail synchronizeStack(Stack stack, UmsUsersState umsUsersState, UserSyncOptions options) {
MDCBuilder.buildMdcContext(stack);
String environmentCrn = stack.getEnvironmentCrn();
Multimap<String, String> warnings = ArrayListMultimap.create();
try {
FreeIpaClient freeIpaClient = freeIpaClientFactory.getFreeIpaClientForStack(stack);
UsersStateDifference usersStateDifferenceBeforeSync = compareUmsAndFreeIpa(umsUsersState, options, freeIpaClient);
stateApplier.applyDifference(umsUsersState, environmentCrn, warnings, usersStateDifferenceBeforeSync, options, freeIpaClient);
retrySyncIfBatchCallHasWarnings(stack, umsUsersState, warnings, options, freeIpaClient, usersStateDifferenceBeforeSync);
if (options.isFullSync()) {
// TODO For now we only sync cloud ids during full sync. We should eventually allow more granular syncs (actor level and group level sync).
if (entitlementService.cloudIdentityMappingEnabled(stack.getAccountId())) {
LOGGER.debug("Starting {} ...", SYNC_CLOUD_IDENTITIES);
cloudIdentitySyncService.syncCloudIdentities(stack, umsUsersState, warnings::put);
LOGGER.debug("Finished {}.", SYNC_CLOUD_IDENTITIES);
}
if (entitlementService.isEnvironmentPrivilegedUserEnabled(stack.getAccountId())) {
LOGGER.debug("Starting {} ...", ADD_SUDO_RULES);
try {
sudoRuleService.setupSudoRule(stack, freeIpaClient);
} catch (Exception e) {
warnings.put(stack.getEnvironmentCrn(), e.getMessage());
LOGGER.error("{} failed for environment '{}'.", ADD_SUDO_RULES, stack.getEnvironmentCrn(), e);
}
LOGGER.debug("Finished {}.", ADD_SUDO_RULES);
}
}
return toSyncStatusDetail(environmentCrn, warnings);
} catch (TimeoutException e) {
LOGGER.warn("Timed out while synchronizing environment {}", environmentCrn, e);
return SyncStatusDetail.fail(environmentCrn, "Timed out", warnings); |
432 | 0 | /** {@inheritDoc} */ | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} | NONSATD | true | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} |
432 | 1 | // TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable; | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} | DESIGN | true | this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} |
432 | 2 | // TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable; | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} | DESIGN | true | this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} | protected void addSubtable(GlyphSubtable subtable) {
if (subtable instanceof GlyphClassSubtable) {
this.gct = (GlyphClassSubtable) subtable;
} else if (subtable instanceof AttachmentPointSubtable) {
// TODO - not yet used
// this.apt = (AttachmentPointSubtable) subtable;
} else if (subtable instanceof LigatureCaretSubtable) {
// TODO - not yet used
// this.lct = (LigatureCaretSubtable) subtable;
} else if (subtable instanceof MarkAttachmentSubtable) {
this.mat = (MarkAttachmentSubtable) subtable;
} else {
throw new UnsupportedOperationException("unsupported glyph definition subtable type: " + subtable);
}
} |
33,203 | 0 | // TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator... | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory reference: " + propertyName + " not specified on bean: " + beanName);
}
}
}
}
} | DESIGN | true | Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) { | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
} | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory reference: " + propertyName + " not specified on bean: " + beanName);
} |
33,203 | 1 | // TODO use property.name()? | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory reference: " + propertyName + " not specified on bean: " + beanName);
}
}
}
}
} | DESIGN | true | if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues(); | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) { | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory reference: " + propertyName + " not specified on bean: " + beanName);
}
}
}
}
} |
33,203 | 2 | // TODO use reference.name()? | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory reference: " + propertyName + " not specified on bean: " + beanName);
}
}
}
}
} | DESIGN | true | if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues(); | String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory reference: " + propertyName + " not specified on bean: " + beanName);
}
}
}
}
} | protected void processProperty(String beanName, BeanDefinition definition, PropertyDescriptor descriptor) throws BeansException {
Method method = descriptor.getWriteMethod();
if (method != null) {
// TODO should we handle the property.name() attribute?
// maybe add this to XBean code generator...
Property property = method.getAnnotation(Property.class);
if (property != null) {
if (property.required()) {
// TODO use property.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory property: " + propertyName + " not specified on bean: " + beanName);
}
}
}
Reference reference = method.getAnnotation(Reference.class);
if (reference != null) {
if (reference.required()) {
// TODO use reference.name()?
String propertyName = descriptor.getName();
MutablePropertyValues propertyValues = definition.getPropertyValues();
if (!propertyValues.contains(propertyName)) {
throw new BeanInitializationException("Mandatory reference: " + propertyName + " not specified on bean: " + beanName);
}
}
}
}
} |
25,017 | 0 | //get process id | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} | NONSATD | true | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0]; | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} |
25,017 | 1 | //pid@hostname | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} | NONSATD | true | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9 | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} |
25,017 | 2 | // TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid(); | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} | DESIGN | true | String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} | public static String obtainProcessID() {
//get process id
String pname = ManagementFactory.getRuntimeMXBean().getName(); //pid@hostname
String pid = pname.split("@")[0];
// TODO: change this as soon as we switch to a java version >= 9
// import java.lang.ProcessHandle;
// pid = ProcessHandle.current().pid();
return pid;
} |
25,018 | 0 | // may happen only due to internal programming error | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery; | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) { | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath()); |
25,018 | 1 | // the query is a contradiction, there are no matches | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
} | }
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null; | throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false; |
25,018 | 2 | // if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all | if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) { | if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i); |
25,018 | 3 | // fully non-indexed execution because the filter matches everything or there is no indexing at all | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | // if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
} | }
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p); | PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
} |
25,018 | 4 | // deduplicate sort fields | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) { | if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
} | boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way |
25,018 | 5 | //todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored. | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | DESIGN | true | sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause); | String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) { | idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
} |
25,018 | 6 | // identity comparison is intended here! | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) { | }
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length]; | if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes); |
25,018 | 7 | // all involved fields are indexed, so go the Lucene way | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) { | }
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0; | // deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> { |
25,018 | 8 | // all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) { | }
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i]; | PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
} |
25,018 | 9 | // but some projections are duplicated ... | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length]; | BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++; | if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
} |
25,018 | 10 | // projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | }
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1); | }
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
} | for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} |
25,018 | 11 | // expansion leads to a full non-indexed query or the expansion is too long/complex | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | }
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
} | }
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | } else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} |
25,018 | 12 | // some fields are indexed, run a hybrid query | private BaseQuery buildQueryNoAggregations(QueryFactory queryFactory, String queryString, Map<String, Object> namedParameters,
long startOffset, int maxResults, IckleParsingResult<TypeMetadata> parsingResult) {
if (parsingResult.hasGroupingOrAggregations()) {
throw log.queryMustNotUseGroupingOrAggregation(); // may happen only due to internal programming error
}
boolean isFullTextQuery;
if (parsingResult.getWhereClause() != null) {
isFullTextQuery = parsingResult.getWhereClause().acceptVisitor(FullTextVisitor.INSTANCE);
if (!isIndexed && isFullTextQuery) {
throw new IllegalStateException("The cache must be indexed in order to use full-text queries.");
}
}
if (parsingResult.getSortFields() != null) {
for (SortField sortField : parsingResult.getSortFields()) {
PropertyPath<?> p = sortField.getPath();
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeUsedInOrderBy(p.toString());
}
}
}
if (parsingResult.getProjectedPaths() != null) {
for (PropertyPath<?> p : parsingResult.getProjectedPaths()) {
if (propertyHelper.isRepeatedProperty(parsingResult.getTargetEntityMetadata(), p.asArrayPath())) {
throw log.multivaluedPropertyCannotBeProjected(p.asStringPath());
}
}
}
BooleanExpr normalizedWhereClause = booleanFilterNormalizer.normalize(parsingResult.getWhereClause());
if (normalizedWhereClause == ConstantBooleanExpr.FALSE) {
// the query is a contradiction, there are no matches
return new EmptyResultQuery(queryFactory, cache, queryString, namedParameters, startOffset, maxResults);
}
// if cache is indexed but there is no actual 'where' filter clause and we do have sorting or projections we should still use the index, otherwise just go for a non-indexed fetch-all
if (!isIndexed || (normalizedWhereClause == null || normalizedWhereClause == ConstantBooleanExpr.TRUE) && parsingResult.getProjections() == null && parsingResult.getSortFields() == null) {
// fully non-indexed execution because the filter matches everything or there is no indexing at all
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
IndexedFieldProvider.FieldIndexingMetadata fieldIndexingMetadata = propertyHelper.getIndexedFieldProvider().get(parsingResult.getTargetEntityMetadata());
boolean allProjectionsAreStored = true;
LinkedHashMap<PropertyPath, List<Integer>> projectionsMap = null;
if (parsingResult.getProjectedPaths() != null) {
projectionsMap = new LinkedHashMap<>();
for (int i = 0; i < parsingResult.getProjectedPaths().length; i++) {
PropertyPath<?> p = parsingResult.getProjectedPaths()[i];
List<Integer> idx = projectionsMap.get(p);
if (idx == null) {
idx = new ArrayList<>();
projectionsMap.put(p, idx);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allProjectionsAreStored = false;
}
}
idx.add(i);
}
}
boolean allSortFieldsAreStored = true;
SortField[] sortFields = parsingResult.getSortFields();
if (sortFields != null) {
// deduplicate sort fields
LinkedHashMap<String, SortField> sortFieldMap = new LinkedHashMap<>();
for (SortField sf : sortFields) {
PropertyPath<?> p = sf.getPath();
String asStringPath = p.asStringPath();
if (!sortFieldMap.containsKey(asStringPath)) {
sortFieldMap.put(asStringPath, sf);
if (!fieldIndexingMetadata.isStored(p.asArrayPath())) {
allSortFieldsAreStored = false;
}
}
}
sortFields = sortFieldMap.values().toArray(new SortField[sortFieldMap.size()]);
}
//todo [anistor] do not allow hybrid queries with fulltext. exception, allow a fully indexed query followed by in-memory aggregation. the aggregated or 'having' field should not be analyzed
//todo [anistor] do we allow aggregation in fulltext queries?
//todo [anistor] do not allow hybrid fulltext queries. all 'where' fields must be indexed. all projections must be stored.
BooleShannonExpansion bse = new BooleShannonExpansion(MAX_EXPANSION_COFACTORS, fieldIndexingMetadata);
BooleanExpr expansion = bse.expand(normalizedWhereClause);
if (expansion == normalizedWhereClause) { // identity comparison is intended here!
// all involved fields are indexed, so go the Lucene way
if (allSortFieldsAreStored) {
if (allProjectionsAreStored) {
// all projections are stored, so we can execute the query entirely against the index, and we can also sort using the index
RowProcessor rowProcessor = null;
if (parsingResult.getProjectedPaths() != null) {
if (projectionsMap.size() != parsingResult.getProjectedPaths().length) {
// but some projections are duplicated ...
final Class<?>[] projectedTypes = new Class<?>[projectionsMap.size()];
final int[] map = new int[parsingResult.getProjectedPaths().length];
int j = 0;
for (List<Integer> idx : projectionsMap.values()) {
int i = idx.get(0);
projectedTypes[j] = parsingResult.getProjectedTypes()[i];
for (int k : idx) {
map[k] = j;
}
j++;
}
RowProcessor projectionProcessor = makeProjectionProcessor(projectedTypes);
rowProcessor = inRow -> {
if (projectionProcessor != null) {
inRow = projectionProcessor.process(inRow);
}
Object[] outRow = new Object[map.length];
for (int i = 0; i < map.length; i++) {
outRow[i] = inRow[map[i]];
}
return outRow;
};
PropertyPath[] deduplicatedProjection = projectionsMap.keySet().toArray(new PropertyPath[projectionsMap.size()]);
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, deduplicatedProjection, projectedTypes, sortFields);
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
rowProcessor = makeProjectionProcessor(parsingResult.getProjectedTypes());
}
}
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | NONSATD | true | return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1); | IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} | }
return new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, parsingResult, parsingResult.getProjections(), makeResultProcessor(rowProcessor), startOffset, maxResults);
} else {
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, sortFields);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), startOffset, maxResults);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, null);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), -1, -1, indexQuery);
}
} else {
// projections may be stored but some sort fields are not so we need to query the index and then execute in-memory sorting and projecting in a second phase
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, normalizedWhereClause, null, null, null);
Query indexQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
String projectionQueryStr = SyntaxTreePrinter.printTree(parsingResult.getTargetEntityName(), parsingResult.getProjectedPaths(), null, sortFields);
return new HybridQuery(queryFactory, cache, projectionQueryStr, null, getObjectFilter(matcher, projectionQueryStr, null, null), startOffset, maxResults, indexQuery);
}
}
if (expansion == ConstantBooleanExpr.TRUE) {
// expansion leads to a full non-indexed query or the expansion is too long/complex
return new EmbeddedQuery(this, queryFactory, cache, queryString, namedParameters, parsingResult.getProjections(), startOffset, maxResults);
}
// some fields are indexed, run a hybrid query
IckleParsingResult<TypeMetadata> fpr = makeFilterParsingResult(parsingResult, expansion, null, null, null);
Query expandedQuery = new EmbeddedLuceneQuery<>(this, queryFactory, namedParameters, fpr, null, makeResultProcessor(null), -1, -1);
return new HybridQuery(queryFactory, cache, queryString, namedParameters, getObjectFilter(matcher, queryString, namedParameters, null), startOffset, maxResults, expandedQuery);
} |
8,635 | 0 | //TODO: collection.toString() will likely not produce any useful output! | private void tryConnectToIPv6() throws Exception {
Collection<Inet6Address> possibleInetAddresses = AddressUtil
.getPossibleInetAddressesFor((Inet6Address) address.getInetAddress());
Level level = silent ? Level.FINEST : Level.INFO;
//TODO: collection.toString() will likely not produce any useful output!
if (logger.isLoggable(level)) {
logger.log(level, "Trying to connect possible IPv6 addresses: " + possibleInetAddresses);
}
boolean connected = false;
Exception error = null;
int configuredTimeoutMillis =
ioService.getSocketConnectTimeoutSeconds(endpointManager.getEndpointQualifier()) * MILLIS_PER_SECOND;
int timeoutMillis = configuredTimeoutMillis > 0 && configuredTimeoutMillis < Integer.MAX_VALUE
? configuredTimeoutMillis : DEFAULT_IPV6_SOCKET_CONNECT_TIMEOUT_SECONDS * MILLIS_PER_SECOND;
for (Inet6Address inetAddress : possibleInetAddresses) {
try {
tryToConnect(new InetSocketAddress(inetAddress, address.getPort()), timeoutMillis);
connected = true;
break;
} catch (Exception e) {
error = e;
}
}
if (!connected && error != null) {
// could not connect any of addresses
throw error;
}
} | DEFECT | true | .getPossibleInetAddressesFor((Inet6Address) address.getInetAddress());
Level level = silent ? Level.FINEST : Level.INFO;
//TODO: collection.toString() will likely not produce any useful output!
if (logger.isLoggable(level)) {
logger.log(level, "Trying to connect possible IPv6 addresses: " + possibleInetAddresses); | private void tryConnectToIPv6() throws Exception {
Collection<Inet6Address> possibleInetAddresses = AddressUtil
.getPossibleInetAddressesFor((Inet6Address) address.getInetAddress());
Level level = silent ? Level.FINEST : Level.INFO;
//TODO: collection.toString() will likely not produce any useful output!
if (logger.isLoggable(level)) {
logger.log(level, "Trying to connect possible IPv6 addresses: " + possibleInetAddresses);
}
boolean connected = false;
Exception error = null;
int configuredTimeoutMillis =
ioService.getSocketConnectTimeoutSeconds(endpointManager.getEndpointQualifier()) * MILLIS_PER_SECOND;
int timeoutMillis = configuredTimeoutMillis > 0 && configuredTimeoutMillis < Integer.MAX_VALUE
? configuredTimeoutMillis : DEFAULT_IPV6_SOCKET_CONNECT_TIMEOUT_SECONDS * MILLIS_PER_SECOND;
for (Inet6Address inetAddress : possibleInetAddresses) { | private void tryConnectToIPv6() throws Exception {
Collection<Inet6Address> possibleInetAddresses = AddressUtil
.getPossibleInetAddressesFor((Inet6Address) address.getInetAddress());
Level level = silent ? Level.FINEST : Level.INFO;
//TODO: collection.toString() will likely not produce any useful output!
if (logger.isLoggable(level)) {
logger.log(level, "Trying to connect possible IPv6 addresses: " + possibleInetAddresses);
}
boolean connected = false;
Exception error = null;
int configuredTimeoutMillis =
ioService.getSocketConnectTimeoutSeconds(endpointManager.getEndpointQualifier()) * MILLIS_PER_SECOND;
int timeoutMillis = configuredTimeoutMillis > 0 && configuredTimeoutMillis < Integer.MAX_VALUE
? configuredTimeoutMillis : DEFAULT_IPV6_SOCKET_CONNECT_TIMEOUT_SECONDS * MILLIS_PER_SECOND;
for (Inet6Address inetAddress : possibleInetAddresses) {
try {
tryToConnect(new InetSocketAddress(inetAddress, address.getPort()), timeoutMillis);
connected = true;
break;
} catch (Exception e) {
error = e;
}
}
if (!connected && error != null) {
// could not connect any of addresses |
8,635 | 1 | // could not connect any of addresses | private void tryConnectToIPv6() throws Exception {
Collection<Inet6Address> possibleInetAddresses = AddressUtil
.getPossibleInetAddressesFor((Inet6Address) address.getInetAddress());
Level level = silent ? Level.FINEST : Level.INFO;
//TODO: collection.toString() will likely not produce any useful output!
if (logger.isLoggable(level)) {
logger.log(level, "Trying to connect possible IPv6 addresses: " + possibleInetAddresses);
}
boolean connected = false;
Exception error = null;
int configuredTimeoutMillis =
ioService.getSocketConnectTimeoutSeconds(endpointManager.getEndpointQualifier()) * MILLIS_PER_SECOND;
int timeoutMillis = configuredTimeoutMillis > 0 && configuredTimeoutMillis < Integer.MAX_VALUE
? configuredTimeoutMillis : DEFAULT_IPV6_SOCKET_CONNECT_TIMEOUT_SECONDS * MILLIS_PER_SECOND;
for (Inet6Address inetAddress : possibleInetAddresses) {
try {
tryToConnect(new InetSocketAddress(inetAddress, address.getPort()), timeoutMillis);
connected = true;
break;
} catch (Exception e) {
error = e;
}
}
if (!connected && error != null) {
// could not connect any of addresses
throw error;
}
} | NONSATD | true | }
if (!connected && error != null) {
// could not connect any of addresses
throw error;
} | for (Inet6Address inetAddress : possibleInetAddresses) {
try {
tryToConnect(new InetSocketAddress(inetAddress, address.getPort()), timeoutMillis);
connected = true;
break;
} catch (Exception e) {
error = e;
}
}
if (!connected && error != null) {
// could not connect any of addresses
throw error;
}
} | //TODO: collection.toString() will likely not produce any useful output!
if (logger.isLoggable(level)) {
logger.log(level, "Trying to connect possible IPv6 addresses: " + possibleInetAddresses);
}
boolean connected = false;
Exception error = null;
int configuredTimeoutMillis =
ioService.getSocketConnectTimeoutSeconds(endpointManager.getEndpointQualifier()) * MILLIS_PER_SECOND;
int timeoutMillis = configuredTimeoutMillis > 0 && configuredTimeoutMillis < Integer.MAX_VALUE
? configuredTimeoutMillis : DEFAULT_IPV6_SOCKET_CONNECT_TIMEOUT_SECONDS * MILLIS_PER_SECOND;
for (Inet6Address inetAddress : possibleInetAddresses) {
try {
tryToConnect(new InetSocketAddress(inetAddress, address.getPort()), timeoutMillis);
connected = true;
break;
} catch (Exception e) {
error = e;
}
}
if (!connected && error != null) {
// could not connect any of addresses
throw error;
}
} |