file_id
int64 1
215k
| content
stringlengths 7
454k
| repo
stringlengths 6
113
| path
stringlengths 6
251
|
---|---|---|---|
1,410 | /*
*
* Apache License
* Version 2.0, January 2004
* http://www.apache.org/licenses/
*
* TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
*
* 1. Definitions.
*
* "License" shall mean the terms and conditions for use, reproduction,
* and distribution as defined by Sections 1 through 9 of this document.
*
* "Licensor" shall mean the copyright owner or entity authorized by
* the copyright owner that is granting the License.
*
* "Legal Entity" shall mean the union of the acting entity and all
* other entities that control, are controlled by, or are under common
* control with that entity. For the purposes of this definition,
* "control" means (i) the power, direct or indirect, to cause the
* direction or management of such entity, whether by contract or
* otherwise, or (ii) ownership of fifty percent (50%) or more of the
* outstanding shares, or (iii) beneficial ownership of such entity.
*
* "You" (or "Your") shall mean an individual or Legal Entity
* exercising permissions granted by this License.
*
* "Source" form shall mean the preferred form for making modifications,
* including but not limited to software source code, documentation
* source, and configuration files.
*
* "Object" form shall mean any form resulting from mechanical
* transformation or translation of a Source form, including but
* not limited to compiled object code, generated documentation,
* and conversions to other media types.
*
* "Work" shall mean the work of authorship, whether in Source or
* Object form, made available under the License, as indicated by a
* copyright notice that is included in or attached to the work
* (an example is provided in the Appendix below).
*
* "Derivative Works" shall mean any work, whether in Source or Object
* form, that is based on (or derived from) the Work and for which the
* editorial revisions, annotations, elaborations, or other modifications
* represent, as a whole, an original work of authorship. For the purposes
* of this License, Derivative Works shall not include works that remain
* separable from, or merely link (or bind by name) to the interfaces of,
* the Work and Derivative Works thereof.
*
* "Contribution" shall mean any work of authorship, including
* the original version of the Work and any modifications or additions
* to that Work or Derivative Works thereof, that is intentionally
* submitted to Licensor for inclusion in the Work by the copyright owner
* or by an individual or Legal Entity authorized to submit on behalf of
* the copyright owner. For the purposes of this definition, "submitted"
* means any form of electronic, verbal, or written communication sent
* to the Licensor or its representatives, including but not limited to
* communication on electronic mailing lists, source code control systems,
* and issue tracking systems that are managed by, or on behalf of, the
* Licensor for the purpose of discussing and improving the Work, but
* excluding communication that is conspicuously marked or otherwise
* designated in writing by the copyright owner as "Not a Contribution."
*
* "Contributor" shall mean Licensor and any individual or Legal Entity
* on behalf of whom a Contribution has been received by Licensor and
* subsequently incorporated within the Work.
*
* 2. Grant of Copyright License. Subject to the terms and conditions of
* this License, each Contributor hereby grants to You a perpetual,
* worldwide, non-exclusive, no-charge, royalty-free, irrevocable
* copyright license to reproduce, prepare Derivative Works of,
* publicly display, publicly perform, sublicense, and distribute the
* Work and such Derivative Works in Source or Object form.
*
* 3. Grant of Patent License. Subject to the terms and conditions of
* this License, each Contributor hereby grants to You a perpetual,
* worldwide, non-exclusive, no-charge, royalty-free, irrevocable
* (except as stated in this section) patent license to make, have made,
* use, offer to sell, sell, import, and otherwise transfer the Work,
* where such license applies only to those patent claims licensable
* by such Contributor that are necessarily infringed by their
* Contribution(s) alone or by combination of their Contribution(s)
* with the Work to which such Contribution(s) was submitted. If You
* institute patent litigation against any entity (including a
* cross-claim or counterclaim in a lawsuit) alleging that the Work
* or a Contribution incorporated within the Work constitutes direct
* or contributory patent infringement, then any patent licenses
* granted to You under this License for that Work shall terminate
* as of the date such litigation is filed.
*
* 4. Redistribution. You may reproduce and distribute copies of the
* Work or Derivative Works thereof in any medium, with or without
* modifications, and in Source or Object form, provided that You
* meet the following conditions:
*
* (a) You must give any other recipients of the Work or
* Derivative Works a copy of this License; and
*
* (b) You must cause any modified files to carry prominent notices
* stating that You changed the files; and
*
* (c) You must retain, in the Source form of any Derivative Works
* that You distribute, all copyright, patent, trademark, and
* attribution notices from the Source form of the Work,
* excluding those notices that do not pertain to any part of
* the Derivative Works; and
*
* (d) If the Work includes a "NOTICE" text file as part of its
* distribution, then any Derivative Works that You distribute must
* include a readable copy of the attribution notices contained
* within such NOTICE file, excluding those notices that do not
* pertain to any part of the Derivative Works, in at least one
* of the following places: within a NOTICE text file distributed
* as part of the Derivative Works; within the Source form or
* documentation, if provided along with the Derivative Works; or,
* within a display generated by the Derivative Works, if and
* wherever such third-party notices normally appear. The contents
* of the NOTICE file are for informational purposes only and
* do not modify the License. You may add Your own attribution
* notices within Derivative Works that You distribute, alongside
* or as an addendum to the NOTICE text from the Work, provided
* that such additional attribution notices cannot be construed
* as modifying the License.
*
* You may add Your own copyright statement to Your modifications and
* may provide additional or different license terms and conditions
* for use, reproduction, or distribution of Your modifications, or
* for any such Derivative Works as a whole, provided Your use,
* reproduction, and distribution of the Work otherwise complies with
* the conditions stated in this License.
*
* 5. Submission of Contributions. Unless You explicitly state otherwise,
* any Contribution intentionally submitted for inclusion in the Work
* by You to the Licensor shall be under the terms and conditions of
* this License, without any additional terms or conditions.
* Notwithstanding the above, nothing herein shall supersede or modify
* the terms of any separate license agreement you may have executed
* with Licensor regarding such Contributions.
*
* 6. Trademarks. This License does not grant permission to use the trade
* names, trademarks, service marks, or product names of the Licensor,
* except as required for reasonable and customary use in describing the
* origin of the Work and reproducing the content of the NOTICE file.
*
* 7. Disclaimer of Warranty. Unless required by applicable law or
* agreed to in writing, Licensor provides the Work (and each
* Contributor provides its Contributions) on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied, including, without limitation, any warranties or conditions
* of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
* PARTICULAR PURPOSE. You are solely responsible for determining the
* appropriateness of using or redistributing the Work and assume any
* risks associated with Your exercise of permissions under this License.
*
* 8. Limitation of Liability. In no event and under no legal theory,
* whether in tort (including negligence), contract, or otherwise,
* unless required by applicable law (such as deliberate and grossly
* negligent acts) or agreed to in writing, shall any Contributor be
* liable to You for damages, including any direct, indirect, special,
* incidental, or consequential damages of any character arising as a
* result of this License or out of the use or inability to use the
* Work (including but not limited to damages for loss of goodwill,
* work stoppage, computer failure or malfunction, or any and all
* other commercial damages or losses), even if such Contributor
* has been advised of the possibility of such damages.
*
* 9. Accepting Warranty or Additional Liability. While redistributing
* the Work or Derivative Works thereof, You may choose to offer,
* and charge a fee for, acceptance of support, warranty, indemnity,
* or other liability obligations and/or rights consistent with this
* License. However, in accepting such obligations, You may act only
* on Your own behalf and on Your sole responsibility, not on behalf
* of any other Contributor, and only if You agree to indemnify,
* defend, and hold each Contributor harmless for any liability
* incurred by, or claims asserted against, such Contributor by reason
* of your accepting any such warranty or additional liability.
*
* END OF TERMS AND CONDITIONS
*
* APPENDIX: How to apply the Apache License to your work.
*
* To apply the Apache License to your work, attach the following
* boilerplate notice, with the fields enclosed by brackets "[]"
* replaced with your own identifying information. (Don't include
* the brackets!) The text should be enclosed in the appropriate
* comment syntax for the file format. We also recommend that a
* file or class name and description of purpose be included on the
* same "printed page" as the copyright notice for easier
* identification within third-party archives.
*
* Copyright 2016 Alibaba Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*
*/
package android.taobao.atlas.hack;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import android.taobao.atlas.hack.Hack.HackDeclaration.HackAssertionException;
import android.taobao.atlas.hack.Interception.InterceptionHandler;
import android.util.Log;
/** @author Oasis */
public class Hack {
/** All hacks should be declared in a centralized point extending this class, typically as static
* method, and call it in your application initialization stage to verify all the hack
* assertions by catching exception thrown:
* <pre>
* class MyHacks extends HackDeclaration {
*
* static HackedField<Object, PackageManager> ContextImpl_mPackageManager;
* static HackedField<Object, Map<String, IBinder>> ServiceManager_sCache;
*
* static void defineAndVerify() {
* try {
* ContextImpl_mPackageManager = Hack.into("android.app.ContextImpl").field("mPackageManager").ofType(PackageManager.class);
* ServiceManager_sCache = Hack.into("android.os.ServiceManager").staticField("sCache").ofGenericType(Map.class)
*
* ...
* } catch (HackAssertionException e) {
* // Report the failure and activate fall-back strategy.
* ...
* }
* }
* }
* <pre>
* Thus we can verify them all together in an early application initialization stage. If any assertion
* failed, a fall-back strategy is suggested. */
public static abstract class HackDeclaration {
/** This exception is purposely defined as "protected" and not extending Exception to avoid
* developers unconsciously catch it outside the centralized hacks declaration, which results
* in potentially pre-checked usage of hacks. */
public static class HackAssertionException extends Throwable {
private Class<?> mHackedClass;
private String mHackedFieldName;
private String mHackedMethodName;
public HackAssertionException(final String e) { super(e); }
public HackAssertionException(final Exception e) { super(e); }
@Override public String toString() {
return getCause() != null ? getClass().getName() + ": " + getCause() : super.toString();
}
public Class<?> getHackedClass() {
return mHackedClass;
}
public void setHackedClass(Class<?> mHackedClass) {
this.mHackedClass = mHackedClass;
}
public String getHackedMethodName() {
return mHackedMethodName;
}
public void setHackedMethodName(String methodName) {
this.mHackedMethodName = methodName;
}
public String getHackedFieldName() {
return mHackedFieldName;
}
public void setHackedFieldName(String fieldName) {
this.mHackedFieldName = fieldName;
}
private static final long serialVersionUID = 1L;
}
}
/** Use {@link Hack#setAssertionFailureHandler(AssertionFailureHandler) } to set the global handler */
public interface AssertionFailureHandler {
/** @return whether the failure is handled and no need to throw out, return false to let it thrown */
boolean onAssertionFailure(HackAssertionException failure);
}
/** @beta */
public static class HackedField<C, T> {
@SuppressWarnings("unchecked") // TODO: Add more check
public <T2> HackedField<C, T2> ofGenericType(final Class<?> type) throws HackAssertionException {
if (mField != null && !type.isAssignableFrom(mField.getType()))
fail(new HackAssertionException(new ClassCastException(mField + " is not of type " + type)));
return (HackedField<C, T2>) this;
}
@SuppressWarnings("unchecked")
public <T2> HackedField<C, T2> ofType(final Class<T2> type) throws HackAssertionException {
if (mField != null && !type.isAssignableFrom(mField.getType()))
fail(new HackAssertionException(new ClassCastException(mField + " is not of type " + type)));
return (HackedField<C, T2>) this;
}
@SuppressWarnings("unchecked")
public HackedField<C, T> ofType(final String type_name) throws HackAssertionException {
try { return (HackedField<C, T>) ofType(Class.forName(type_name));
} catch (final ClassNotFoundException e) { fail(new HackAssertionException(e)); return this; }
}
/** Get current value of this field */
public T get(final C instance) {
try {
@SuppressWarnings("unchecked") final T value = (T) mField.get(instance);
return value;
} catch (final IllegalAccessException e) {
e.printStackTrace();
//TBS.Ext.commitEvent("AtlasRuntimeException", AtlasConstant.ATLAS_RUNTIME_EXCEPTION, e.toString());
return null; /* Should never happen */ }
}
/**
* Set value of this field
*
* <p>No type enforced here since most type mismatch can be easily tested and exposed early.</p>
*/
public void set(final C instance,final Object value) {
try {
mField.set(instance, value);
} catch (final IllegalAccessException e) {
e.printStackTrace();
/* Should never happen */
}
}
/**
* Hijack the current instance of this field.
*
* <p><b>The instance must not be null at the time of hijacking</b>, or an IllegalStateException will be thrown.
*
* @param handler a invocation handler to implement the hijack logic.
*/
public void hijack(final C instance,final InterceptionHandler<?> handler) {
final Object delegatee = get(instance);
if (delegatee == null) throw new IllegalStateException("Cannot hijack null");
final Class<?>[] interfaces = delegatee.getClass().getInterfaces();
set(instance,Interception.proxy(delegatee, handler, interfaces));
}
/** @param modifiers the modifiers this field must have */
HackedField(final Class<C> clazz, final String name, int modifiers) throws HackAssertionException {
Field field = null;
try {
if (clazz == null) return;
field = clazz.getDeclaredField(name);
if (modifiers > 0 && (field.getModifiers() & modifiers) != modifiers)
fail(new HackAssertionException(field + " does not match modifiers: " + modifiers));
field.setAccessible(true);
} catch (final NoSuchFieldException e) {
HackAssertionException hae = new HackAssertionException(e);
hae.setHackedClass(clazz);
hae.setHackedFieldName(name);
fail(hae);
} finally { mField = field; }
}
private final Field mField;
public Field getField() {
return mField;
}
}
public static class HackedMethod {
private static final String TAG = "HackedMethod";
public Object invoke(final Object receiver, final Object... args) throws IllegalArgumentException, InvocationTargetException {
Object obj = null;
try {
obj = mMethod.invoke(receiver, args);
return obj;
} catch (final IllegalAccessException e) { /* Should never happen */
e.printStackTrace();
}
return obj;
}
HackedMethod(final Class<?> clazz, final String name, final Class<?>[] arg_types, int modifiers) throws HackAssertionException {
Method method = null;
try {
if (clazz == null) return;
method = clazz.getDeclaredMethod(name, arg_types);
if (modifiers > 0 && (method.getModifiers() & modifiers) != modifiers)
fail(new HackAssertionException(method + " does not match modifiers: " + modifiers));
method.setAccessible(true);
} catch (final NoSuchMethodException e) {
Log.e(TAG, "No such method: " + e.getMessage());
HackAssertionException hae = new HackAssertionException(e);
hae.setHackedClass(clazz);
hae.setHackedMethodName(name);
fail(hae);
} finally { mMethod = method; }
}
protected final Method mMethod;
public Method getMethod() {
return mMethod;
}
}
public static class HackedConstructor {
protected Constructor<?> mConstructor;
HackedConstructor(final Class<?> clazz, final Class<?>[] arg_types) throws HackAssertionException {
try {
if (clazz == null) return;
mConstructor = clazz.getDeclaredConstructor(arg_types);
} catch (NoSuchMethodException e) {
HackAssertionException hae = new HackAssertionException(e);
hae.setHackedClass(clazz);
fail(hae);
}
}
public Object getInstance(final Object... arg_types) throws IllegalArgumentException {
Object obj = null;
mConstructor.setAccessible(true);
try {
obj = mConstructor.newInstance(arg_types);
} catch (Exception e) {
e.printStackTrace();
}
return obj;
}
}
public static class HackedClass<C> {
public HackedField<C, Object> staticField(final String name) throws HackAssertionException {
return new HackedField<C, Object>(mClass, name, Modifier.STATIC);
}
public HackedField<C, Object> field(final String name) throws HackAssertionException {
return new HackedField<C, Object>(mClass, name, 0);
}
public HackedMethod staticMethod(final String name, final Class<?>... arg_types) throws HackAssertionException {
return new HackedMethod(mClass, name, arg_types, Modifier.STATIC);
}
public HackedMethod method(final String name, final Class<?>... arg_types) throws HackAssertionException {
return new HackedMethod(mClass, name, arg_types, 0);
}
public HackedConstructor constructor(final Class<?>... arg_types) throws HackAssertionException {
return new HackedConstructor(mClass, arg_types);
}
public HackedClass(final Class<C> clazz) { mClass = clazz; }
protected Class<C> mClass;
public Class<C> getmClass() {
return mClass;
}
}
public static <T> HackedClass<T> into(final Class<T> clazz) {
return new HackedClass<T>(clazz);
}
@SuppressWarnings({ "rawtypes", "unchecked" })
public static <T> HackedClass<T> into(final String class_name) throws HackAssertionException {
try {
return new HackedClass(Class.forName(class_name));
} catch (final ClassNotFoundException e) {
fail(new HackAssertionException(e));
return new HackedClass(null); // TODO: Better solution to avoid null?
}
}
private static void fail(HackAssertionException e) throws HackAssertionException {
if (sFailureHandler == null || ! sFailureHandler.onAssertionFailure(e)) throw e;
}
/** Specify a handler to deal with assertion failure, and decide whether the failure should be thrown. */
public static void setAssertionFailureHandler(AssertionFailureHandler handler) {
sFailureHandler = handler;
}
private Hack() {}
private static AssertionFailureHandler sFailureHandler;
}
| alibaba/atlas | atlas-core/src/main/java/android/taobao/atlas/hack/Hack.java |
1,411 | // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.firebase.database.core;
import static com.google.firebase.database.core.utilities.Utilities.hardAssert;
import androidx.annotation.NonNull;
import com.google.android.gms.tasks.Task;
import com.google.android.gms.tasks.TaskCompletionSource;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseException;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.InternalHelpers;
import com.google.firebase.database.MutableData;
import com.google.firebase.database.Query;
import com.google.firebase.database.Transaction;
import com.google.firebase.database.ValueEventListener;
import com.google.firebase.database.annotations.NotNull;
import com.google.firebase.database.connection.HostInfo;
import com.google.firebase.database.connection.ListenHashProvider;
import com.google.firebase.database.connection.PersistentConnection;
import com.google.firebase.database.connection.RequestResultCallback;
import com.google.firebase.database.core.persistence.NoopPersistenceManager;
import com.google.firebase.database.core.persistence.PersistenceManager;
import com.google.firebase.database.core.utilities.DefaultClock;
import com.google.firebase.database.core.utilities.DefaultRunLoop;
import com.google.firebase.database.core.utilities.OffsetClock;
import com.google.firebase.database.core.utilities.Tree;
import com.google.firebase.database.core.view.Event;
import com.google.firebase.database.core.view.EventRaiser;
import com.google.firebase.database.core.view.QuerySpec;
import com.google.firebase.database.logging.LogWrapper;
import com.google.firebase.database.snapshot.ChildKey;
import com.google.firebase.database.snapshot.EmptyNode;
import com.google.firebase.database.snapshot.IndexedNode;
import com.google.firebase.database.snapshot.Node;
import com.google.firebase.database.snapshot.NodeUtilities;
import com.google.firebase.database.snapshot.RangeMerge;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
public class Repo implements PersistentConnection.Delegate {
private static final String INTERRUPT_REASON = "repo_interrupt";
private static final int GET_TIMEOUT_MS = 3000;
private final RepoInfo repoInfo;
private final OffsetClock serverClock = new OffsetClock(new DefaultClock(), 0);
private PersistentConnection connection;
private SnapshotHolder infoData;
private SparseSnapshotTree onDisconnect;
private Tree<List<TransactionData>> transactionQueueTree;
private boolean hijackHash = false;
private final EventRaiser eventRaiser;
private final Context ctx;
private final LogWrapper operationLogger;
private final LogWrapper transactionLogger;
private final LogWrapper dataLogger;
public long dataUpdateCount = 0; // for testing.
private long nextWriteId = 1;
private SyncTree infoSyncTree;
private SyncTree serverSyncTree;
private FirebaseDatabase database;
private boolean loggedTransactionPersistenceWarning = false;
Repo(RepoInfo repoInfo, Context ctx, FirebaseDatabase database) {
this.repoInfo = repoInfo;
this.ctx = ctx;
this.database = database;
operationLogger = this.ctx.getLogger("RepoOperation");
transactionLogger = this.ctx.getLogger("Transaction");
dataLogger = this.ctx.getLogger("DataOperation");
this.eventRaiser = new EventRaiser(this.ctx);
// Kick off any expensive additional initialization
scheduleNow(
new Runnable() {
@Override
public void run() {
deferredInitialization();
}
});
}
/**
* Defers any initialization that is potentially expensive (e.g. disk access) and must be run on
* the run loop
*/
private void deferredInitialization() {
HostInfo hostInfo = new HostInfo(repoInfo.host, repoInfo.namespace, repoInfo.secure);
connection = ctx.newPersistentConnection(hostInfo, this);
this.ctx
.getAuthTokenProvider()
.addTokenChangeListener(
((DefaultRunLoop) ctx.getRunLoop()).getExecutorService(),
new TokenProvider.TokenChangeListener() {
@Override
public void onTokenChange() {
operationLogger.debug("Auth token changed, triggering auth token refresh");
connection.refreshAuthToken();
}
@Override
public void onTokenChange(String token) {
operationLogger.debug("Auth token changed, triggering auth token refresh");
connection.refreshAuthToken(token);
}
});
this.ctx
.getAppCheckTokenProvider()
.addTokenChangeListener(
((DefaultRunLoop) ctx.getRunLoop()).getExecutorService(),
new TokenProvider.TokenChangeListener() {
@Override
public void onTokenChange() {
operationLogger.debug(
"App check token changed, triggering app check token refresh");
connection.refreshAppCheckToken();
}
@Override
public void onTokenChange(String token) {
operationLogger.debug(
"App check token changed, triggering app check token refresh");
connection.refreshAppCheckToken(token);
}
});
// Open connection now so that by the time we are connected the deferred init has run
// This relies on the fact that all callbacks run on repo's runloop.
connection.initialize();
PersistenceManager persistenceManager = ctx.getPersistenceManager(repoInfo.host);
infoData = new SnapshotHolder();
onDisconnect = new SparseSnapshotTree();
transactionQueueTree = new Tree<List<TransactionData>>();
infoSyncTree =
new SyncTree(
ctx,
new NoopPersistenceManager(),
new SyncTree.ListenProvider() {
@Override
public void startListening(
final QuerySpec query,
Tag tag,
final ListenHashProvider hash,
final SyncTree.CompletionListener onComplete) {
scheduleNow(
new Runnable() {
@Override
public void run() {
// This is possibly a hack, but we have different semantics for .info
// endpoints. We don't raise null events on initial data...
final Node node = infoData.getNode(query.getPath());
if (!node.isEmpty()) {
List<? extends Event> infoEvents =
infoSyncTree.applyServerOverwrite(query.getPath(), node);
postEvents(infoEvents);
onComplete.onListenComplete(null);
}
}
});
}
@Override
public void stopListening(QuerySpec query, Tag tag) {}
});
serverSyncTree =
new SyncTree(
ctx,
persistenceManager,
new SyncTree.ListenProvider() {
@Override
public void startListening(
QuerySpec query,
Tag tag,
ListenHashProvider hash,
final SyncTree.CompletionListener onListenComplete) {
connection.listen(
query.getPath().asList(),
query.getParams().getWireProtocolParams(),
hash,
tag != null ? tag.getTagNumber() : null,
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
List<? extends Event> events = onListenComplete.onListenComplete(error);
postEvents(events);
}
});
}
@Override
public void stopListening(QuerySpec query, Tag tag) {
connection.unlisten(
query.getPath().asList(), query.getParams().getWireProtocolParams());
}
});
restoreWrites(persistenceManager);
updateInfo(Constants.DOT_INFO_AUTHENTICATED, false);
updateInfo(Constants.DOT_INFO_CONNECTED, false);
}
private void restoreWrites(PersistenceManager persistenceManager) {
List<UserWriteRecord> writes = persistenceManager.loadUserWrites();
Map<String, Object> serverValues = ServerValues.generateServerValues(serverClock);
long lastWriteId = Long.MIN_VALUE;
for (final UserWriteRecord write : writes) {
RequestResultCallback onComplete =
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
warnIfWriteFailed("Persisted write", write.getPath(), error);
ackWriteAndRerunTransactions(write.getWriteId(), write.getPath(), error);
}
};
if (lastWriteId >= write.getWriteId()) {
throw new IllegalStateException("Write ids were not in order.");
}
lastWriteId = write.getWriteId();
nextWriteId = write.getWriteId() + 1;
if (write.isOverwrite()) {
if (operationLogger.logsDebug()) {
operationLogger.debug("Restoring overwrite with id " + write.getWriteId());
}
connection.put(write.getPath().asList(), write.getOverwrite().getValue(true), onComplete);
Node resolved =
ServerValues.resolveDeferredValueSnapshot(
write.getOverwrite(), serverSyncTree, write.getPath(), serverValues);
serverSyncTree.applyUserOverwrite(
write.getPath(),
write.getOverwrite(),
resolved,
write.getWriteId(),
/*visible=*/ true,
/*persist=*/ false);
} else {
if (operationLogger.logsDebug()) {
operationLogger.debug("Restoring merge with id " + write.getWriteId());
}
connection.merge(write.getPath().asList(), write.getMerge().getValue(true), onComplete);
CompoundWrite resolved =
ServerValues.resolveDeferredValueMerge(
write.getMerge(), serverSyncTree, write.getPath(), serverValues);
serverSyncTree.applyUserMerge(
write.getPath(), write.getMerge(), resolved, write.getWriteId(), /*persist=*/ false);
}
}
}
public FirebaseDatabase getDatabase() {
return this.database;
}
@Override
public String toString() {
return repoInfo.toString();
}
public RepoInfo getRepoInfo() {
return this.repoInfo;
}
// Regarding the next three methods: scheduleNow, schedule, and postEvent:
// Please use these methods rather than accessing the context directly. This ensures that the
// context is correctly re-initialized if it was previously shut down. In practice, this means
// that when a task is submitted, we will guarantee at least one thread in the core pool for the
// run loop.
public void scheduleNow(Runnable r) {
ctx.requireStarted();
ctx.getRunLoop().scheduleNow(r);
}
public void scheduleDelayed(Runnable r, long millis) {
ctx.requireStarted();
ctx.getRunLoop().schedule(r, millis);
}
public void postEvent(Runnable r) {
ctx.requireStarted();
ctx.getEventTarget().postEvent(r);
}
private void postEvents(final List<? extends Event> events) {
if (!events.isEmpty()) {
this.eventRaiser.raiseEvents(events);
}
}
public long getServerTime() {
return serverClock.millis();
}
boolean hasListeners() {
return !(this.infoSyncTree.isEmpty() && this.serverSyncTree.isEmpty());
}
// PersistentConnection.Delegate methods
@SuppressWarnings("unchecked") // For the cast on rawMergedData
@Override
public void onDataUpdate(
List<String> pathSegments, Object message, boolean isMerge, Long optTag) {
Path path = new Path(pathSegments);
if (operationLogger.logsDebug()) {
operationLogger.debug("onDataUpdate: " + path);
}
if (dataLogger.logsDebug()) {
operationLogger.debug("onDataUpdate: " + path + " " + message);
}
dataUpdateCount++; // For testing.
List<? extends Event> events;
try {
if (optTag != null) {
Tag tag = new Tag(optTag);
if (isMerge) {
Map<Path, Node> taggedChildren = new HashMap<Path, Node>();
Map<String, Object> rawMergeData = (Map<String, Object>) message;
for (Map.Entry<String, Object> entry : rawMergeData.entrySet()) {
Node newChildNode = NodeUtilities.NodeFromJSON(entry.getValue());
taggedChildren.put(new Path(entry.getKey()), newChildNode);
}
events = this.serverSyncTree.applyTaggedQueryMerge(path, taggedChildren, tag);
} else {
Node taggedSnap = NodeUtilities.NodeFromJSON(message);
events = this.serverSyncTree.applyTaggedQueryOverwrite(path, taggedSnap, tag);
}
} else if (isMerge) {
Map<Path, Node> changedChildren = new HashMap<Path, Node>();
Map<String, Object> rawMergeData = (Map<String, Object>) message;
for (Map.Entry<String, Object> entry : rawMergeData.entrySet()) {
Node newChildNode = NodeUtilities.NodeFromJSON(entry.getValue());
changedChildren.put(new Path(entry.getKey()), newChildNode);
}
events = this.serverSyncTree.applyServerMerge(path, changedChildren);
} else {
Node snap = NodeUtilities.NodeFromJSON(message);
events = this.serverSyncTree.applyServerOverwrite(path, snap);
}
if (events.size() > 0) {
// Since we have a listener outstanding for each transaction, receiving any events
// is a proxy for some change having occurred.
this.rerunTransactions(path);
}
postEvents(events);
} catch (DatabaseException e) {
operationLogger.error("FIREBASE INTERNAL ERROR", e);
}
}
@Override
public void onRangeMergeUpdate(
List<String> pathSegments,
List<com.google.firebase.database.connection.RangeMerge> merges,
Long tagNumber) {
Path path = new Path(pathSegments);
if (operationLogger.logsDebug()) {
operationLogger.debug("onRangeMergeUpdate: " + path);
}
if (dataLogger.logsDebug()) {
operationLogger.debug("onRangeMergeUpdate: " + path + " " + merges);
}
dataUpdateCount++; // For testing.
List<RangeMerge> parsedMerges = new ArrayList<RangeMerge>(merges.size());
for (com.google.firebase.database.connection.RangeMerge merge : merges) {
parsedMerges.add(new RangeMerge(merge));
}
List<? extends Event> events;
if (tagNumber != null) {
events = this.serverSyncTree.applyTaggedRangeMerges(path, parsedMerges, new Tag(tagNumber));
} else {
events = this.serverSyncTree.applyServerRangeMerges(path, parsedMerges);
}
if (events.size() > 0) {
// Since we have a listener outstanding for each transaction, receiving any events
// is a proxy for some change having occurred.
this.rerunTransactions(path);
}
postEvents(events);
}
void callOnComplete(
final DatabaseReference.CompletionListener onComplete,
final DatabaseError error,
final Path path) {
if (onComplete != null) {
final DatabaseReference ref;
ChildKey last = path.getBack();
if (last != null && last.isPriorityChildName()) {
ref = InternalHelpers.createReference(this, path.getParent());
} else {
ref = InternalHelpers.createReference(this, path);
}
postEvent(
new Runnable() {
@Override
public void run() {
onComplete.onComplete(error, ref);
}
});
}
}
private void ackWriteAndRerunTransactions(long writeId, Path path, DatabaseError error) {
if (error != null && error.getCode() == DatabaseError.WRITE_CANCELED) {
// This write was already removed, we just need to ignore it...
} else {
boolean success = error == null;
List<? extends Event> clearEvents =
serverSyncTree.ackUserWrite(writeId, !success, /*persist=*/ true, serverClock);
if (clearEvents.size() > 0) {
rerunTransactions(path);
}
postEvents(clearEvents);
}
}
public void setValue(
final Path path,
Node newValueUnresolved,
final DatabaseReference.CompletionListener onComplete) {
if (operationLogger.logsDebug()) {
operationLogger.debug("set: " + path);
}
if (dataLogger.logsDebug()) {
dataLogger.debug("set: " + path + " " + newValueUnresolved);
}
Map<String, Object> serverValues = ServerValues.generateServerValues(serverClock);
Node existing = serverSyncTree.calcCompleteEventCache(path, new ArrayList<>());
Node newValue =
ServerValues.resolveDeferredValueSnapshot(newValueUnresolved, existing, serverValues);
final long writeId = this.getNextWriteId();
List<? extends Event> events =
this.serverSyncTree.applyUserOverwrite(
path, newValueUnresolved, newValue, writeId, /*visible=*/ true, /*persist=*/ true);
this.postEvents(events);
connection.put(
path.asList(),
newValueUnresolved.getValue(true),
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
warnIfWriteFailed("setValue", path, error);
ackWriteAndRerunTransactions(writeId, path, error);
callOnComplete(onComplete, error, path);
}
});
Path affectedPath = abortTransactions(path, DatabaseError.OVERRIDDEN_BY_SET);
this.rerunTransactions(affectedPath);
}
/**
* The purpose of `getValue` is to return the latest known value satisfying `query`.
*
* <p>If the client is connected, this method will probe for in-memory cached values (active
* listeners). If none are found, the client will reach out to the server and ask for the most
* up-to-date value.
*
* <p>If the client is not connected, this method will check for in-memory cached values (active
* listeners). If none are found, the client will initiate a time-limited connection attempt to
* the server so that it can ask for the latest value. If the client is unable to connect, it will
* fall-back to the persistence cache value.
*
* <p>If, after exhausting all possible options (active-listener cache, server request,
* persistence cache), the client is unable to provide a guess for the latest value for a query,
* it will surface an "offline" error.
*
* <p>Note that `getValue` updates the client's persistence cache whenever it's able to retrieve a
* new server value. It does this by installing a short-lived tracked query.
*
* @param query - The query to surface a value for.
*/
public Task<DataSnapshot> getValue(Query query) {
TaskCompletionSource<DataSnapshot> source = new TaskCompletionSource<>();
final Repo repo = this;
this.scheduleNow(
new Runnable() {
@Override
public void run() {
// Always check active-listener in-memory caches first. These are always at least as
// up to date as the persistence cache
Node serverValue = serverSyncTree.getServerValue(query.getSpec());
if (serverValue != null) {
source.setResult(
InternalHelpers.createDataSnapshot(
query.getRef(), IndexedNode.from(serverValue)));
return;
}
serverSyncTree.setQueryActive(query.getSpec());
final DataSnapshot persisted = serverSyncTree.persistenceServerCache(query);
if (persisted.exists()) {
// Prefer the locally persisted value if the server is not responsive.
scheduleDelayed(() -> source.trySetResult(persisted), GET_TIMEOUT_MS);
}
connection
.get(query.getPath().asList(), query.getSpec().getParams().getWireProtocolParams())
.addOnCompleteListener(
((DefaultRunLoop) ctx.getRunLoop()).getExecutorService(),
(@NonNull Task<Object> task) -> {
if (source.getTask().isComplete()) {
return;
}
if (!task.isSuccessful()) {
if (persisted.exists()) {
source.setResult(persisted);
} else {
source.setException(Objects.requireNonNull(task.getException()));
}
} else {
/*
* We need to replicate the behavior that occurs when running `once()`. In other words,
* we need to create a new eventRegistration, register it with a view and then
* overwrite the data at that location, and then remove the view.
*/
Node serverNode = NodeUtilities.NodeFromJSON(task.getResult());
QuerySpec spec = query.getSpec();
// EventRegistrations require a listener to be attached, so a dummy
// ValueEventListener was created.
keepSynced(spec, /*keep=*/ true, /*skipDedup=*/ true);
List<? extends Event> events;
if (spec.loadsAllData()) {
events = serverSyncTree.applyServerOverwrite(spec.getPath(), serverNode);
} else {
events =
serverSyncTree.applyTaggedQueryOverwrite(
spec.getPath(),
serverNode,
getServerSyncTree().tagForQuery(spec));
}
repo.postEvents(
events); // to ensure that other listeners end up getting their cached
// events.
source.setResult(
InternalHelpers.createDataSnapshot(
query.getRef(),
IndexedNode.from(serverNode, query.getSpec().getIndex())));
keepSynced(spec, /*keep=*/ false, /*skipDedup=*/ true);
}
});
}
});
return source.getTask();
}
public void updateChildren(
final Path path,
CompoundWrite updates,
final DatabaseReference.CompletionListener onComplete,
Map<String, Object> unParsedUpdates) {
if (operationLogger.logsDebug()) {
operationLogger.debug("update: " + path);
}
if (dataLogger.logsDebug()) {
dataLogger.debug("update: " + path + " " + unParsedUpdates);
}
if (updates.isEmpty()) {
if (operationLogger.logsDebug()) {
operationLogger.debug("update called with no changes. No-op");
}
// dispatch on complete
callOnComplete(onComplete, null, path);
return;
}
// Start with our existing data and merge each child into it.
Map<String, Object> serverValues = ServerValues.generateServerValues(serverClock);
CompoundWrite resolved =
ServerValues.resolveDeferredValueMerge(updates, serverSyncTree, path, serverValues);
final long writeId = this.getNextWriteId();
List<? extends Event> events =
this.serverSyncTree.applyUserMerge(path, updates, resolved, writeId, /*persist=*/ true);
this.postEvents(events);
// TODO: DatabaseReference.CompleteionListener isn't really appropriate (the DatabaseReference
// param is meaningless).
connection.merge(
path.asList(),
unParsedUpdates,
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
warnIfWriteFailed("updateChildren", path, error);
ackWriteAndRerunTransactions(writeId, path, error);
callOnComplete(onComplete, error, path);
}
});
for (Entry<Path, Node> update : updates) {
Path pathFromRoot = path.child(update.getKey());
Path affectedPath = abortTransactions(pathFromRoot, DatabaseError.OVERRIDDEN_BY_SET);
rerunTransactions(affectedPath);
}
}
public void purgeOutstandingWrites() {
if (operationLogger.logsDebug()) {
operationLogger.debug("Purging writes");
}
List<? extends Event> events = serverSyncTree.removeAllWrites();
postEvents(events);
// Abort any transactions
abortTransactions(Path.getEmptyPath(), DatabaseError.WRITE_CANCELED);
// Remove outstanding writes from connection
connection.purgeOutstandingWrites();
}
public void removeEventCallback(@NotNull EventRegistration eventRegistration) {
// These are guaranteed not to raise events, since we're not passing in a cancelError. However,
// we can future-proof a little bit by handling the return values anyways.
List<Event> events;
if (Constants.DOT_INFO.equals(eventRegistration.getQuerySpec().getPath().getFront())) {
events = infoSyncTree.removeEventRegistration(eventRegistration);
} else {
events = serverSyncTree.removeEventRegistration(eventRegistration);
}
this.postEvents(events);
}
public void onDisconnectSetValue(
final Path path, final Node newValue, final DatabaseReference.CompletionListener onComplete) {
connection.onDisconnectPut(
path.asList(),
newValue.getValue(true),
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
warnIfWriteFailed("onDisconnect().setValue", path, error);
if (error == null) {
onDisconnect.remember(path, newValue);
}
callOnComplete(onComplete, error, path);
}
});
}
public void onDisconnectUpdate(
final Path path,
final Map<Path, Node> newChildren,
final DatabaseReference.CompletionListener listener,
Map<String, Object> unParsedUpdates) {
connection.onDisconnectMerge(
path.asList(),
unParsedUpdates,
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
warnIfWriteFailed("onDisconnect().updateChildren", path, error);
if (error == null) {
for (Map.Entry<Path, Node> entry : newChildren.entrySet()) {
onDisconnect.remember(path.child(entry.getKey()), entry.getValue());
}
}
callOnComplete(listener, error, path);
}
});
}
public void onDisconnectCancel(
final Path path, final DatabaseReference.CompletionListener onComplete) {
connection.onDisconnectCancel(
path.asList(),
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
if (error == null) {
onDisconnect.forget(path);
}
callOnComplete(onComplete, error, path);
}
});
}
@Override
public void onConnect() {
onServerInfoUpdate(Constants.DOT_INFO_CONNECTED, true);
}
@Override
public void onDisconnect() {
onServerInfoUpdate(Constants.DOT_INFO_CONNECTED, false);
runOnDisconnectEvents();
}
@Override
public void onConnectionStatus(boolean connectionOk) {
onServerInfoUpdate(Constants.DOT_INFO_AUTHENTICATED, connectionOk);
}
public void onServerInfoUpdate(ChildKey key, Object value) {
updateInfo(key, value);
}
@Override
public void onServerInfoUpdate(Map<String, Object> updates) {
for (Map.Entry<String, Object> entry : updates.entrySet()) {
updateInfo(ChildKey.fromString(entry.getKey()), entry.getValue());
}
}
void interrupt() {
connection.interrupt(INTERRUPT_REASON);
}
void resume() {
connection.resume(INTERRUPT_REASON);
}
public void addEventCallback(@NotNull EventRegistration eventRegistration) {
List<? extends Event> events;
ChildKey front = eventRegistration.getQuerySpec().getPath().getFront();
if (front != null && front.equals(Constants.DOT_INFO)) {
events = this.infoSyncTree.addEventRegistration(eventRegistration);
} else {
events = this.serverSyncTree.addEventRegistration(eventRegistration);
}
this.postEvents(events);
}
public void keepSynced(QuerySpec query, boolean keep) {
keepSynced(query, keep, /*skipDedup=*/ false);
}
public void keepSynced(QuerySpec query, boolean keep, final boolean skipDedup) {
hardAssert(query.getPath().isEmpty() || !query.getPath().getFront().equals(Constants.DOT_INFO));
serverSyncTree.keepSynced(query, keep, skipDedup);
}
PersistentConnection getConnection() {
return connection;
}
private void updateInfo(ChildKey childKey, Object value) {
if (childKey.equals(Constants.DOT_INFO_SERVERTIME_OFFSET)) {
serverClock.setOffset((Long) value);
}
Path path = new Path(Constants.DOT_INFO, childKey);
try {
Node node = NodeUtilities.NodeFromJSON(value);
infoData.update(path, node);
List<? extends Event> events = this.infoSyncTree.applyServerOverwrite(path, node);
this.postEvents(events);
} catch (DatabaseException e) {
operationLogger.error("Failed to parse info update", e);
}
}
private long getNextWriteId() {
return this.nextWriteId++;
}
private void runOnDisconnectEvents() {
Map<String, Object> serverValues = ServerValues.generateServerValues(serverClock);
final List<Event> events = new ArrayList<Event>();
onDisconnect.forEachTree(
Path.getEmptyPath(),
new SparseSnapshotTree.SparseSnapshotTreeVisitor() {
@Override
public void visitTree(Path prefixPath, Node node) {
Node existing = serverSyncTree.calcCompleteEventCache(prefixPath, new ArrayList<>());
Node resolvedNode =
ServerValues.resolveDeferredValueSnapshot(node, existing, serverValues);
events.addAll(serverSyncTree.applyServerOverwrite(prefixPath, resolvedNode));
Path affectedPath = abortTransactions(prefixPath, DatabaseError.OVERRIDDEN_BY_SET);
rerunTransactions(affectedPath);
}
});
onDisconnect = new SparseSnapshotTree();
this.postEvents(events);
}
private void warnIfWriteFailed(String writeType, Path path, DatabaseError error) {
// DATA_STALE is a normal, expected error during transaction processing.
if (error != null
&& !(error.getCode() == DatabaseError.DATA_STALE
|| error.getCode() == DatabaseError.WRITE_CANCELED)) {
operationLogger.warn(writeType + " at " + path.toString() + " failed: " + error.toString());
}
}
// Transaction code
/**
* If a transaction does not succeed after 25 retries, we abort it. Among other things this ensure
* that if there's ever a bug causing a mismatch between client / server hashes for some data, we
* won't retry indefinitely.
*/
private static final int TRANSACTION_MAX_RETRIES = 25;
private static final String TRANSACTION_TOO_MANY_RETRIES = "maxretries";
private static final String TRANSACTION_OVERRIDE_BY_SET = "overriddenBySet";
private enum TransactionStatus {
INITIALIZING,
// We've run the transaction and updated transactionResultData_ with the result, but it isn't
// currently sent to the server.
// A transaction will go from RUN -> SENT -> RUN if it comes back from the server as rejected
// due to mismatched hash.
RUN,
// We've run the transaction and sent it to the server and it's currently outstanding (hasn't
// come back as accepted or rejected yet).
SENT,
// Temporary state used to mark completed transactions (whether successful or aborted). The
// transaction will be removed when we get a chance to prune completed ones.
COMPLETED,
// Used when an already-sent transaction needs to be aborted (e.g. due to a conflicting set()
// call that was made). If it comes back as unsuccessful, we'll abort it.
SENT_NEEDS_ABORT,
// Temporary state used to mark transactions that need to be aborted.
NEEDS_ABORT
};
private long transactionOrder = 0;
private static class TransactionData implements Comparable<TransactionData> {
private Path path;
private Transaction.Handler handler;
private ValueEventListener outstandingListener;
private TransactionStatus status;
private long order;
private boolean applyLocally;
private int retryCount;
private DatabaseError abortReason;
private long currentWriteId;
private Node currentInputSnapshot;
private Node currentOutputSnapshotRaw;
private Node currentOutputSnapshotResolved;
private TransactionData(
Path path,
Transaction.Handler handler,
ValueEventListener outstandingListener,
TransactionStatus status,
boolean applyLocally,
long order) {
this.path = path;
this.handler = handler;
this.outstandingListener = outstandingListener;
this.status = status;
this.retryCount = 0;
this.applyLocally = applyLocally;
this.order = order;
this.abortReason = null;
this.currentInputSnapshot = null;
this.currentOutputSnapshotRaw = null;
this.currentOutputSnapshotResolved = null;
}
@Override
public int compareTo(TransactionData o) {
if (order < o.order) {
return -1;
} else if (order == o.order) {
return 0;
} else {
return 1;
}
}
}
public void startTransaction(Path path, final Transaction.Handler handler, boolean applyLocally) {
if (operationLogger.logsDebug()) {
operationLogger.debug("transaction: " + path);
}
if (dataLogger.logsDebug()) {
operationLogger.debug("transaction: " + path);
}
if (this.ctx.isPersistenceEnabled() && !loggedTransactionPersistenceWarning) {
loggedTransactionPersistenceWarning = true;
transactionLogger.info(
"runTransaction() usage detected while persistence is enabled. Please be aware that "
+ "transactions *will not* be persisted across database restarts. See "
+ "https://www.firebase.com/docs/android/guide/offline-capabilities.html"
+ "#section-handling-transactions-offline for more details.");
}
// make sure we're listening on this node
// Note: we can't do this asynchronously. To preserve event ordering,
// it has to be done in this block. This is ok, this block is
// guaranteed to be our own event loop
DatabaseReference watchRef = InternalHelpers.createReference(this, path);
ValueEventListener listener =
new ValueEventListener() {
@Override
public void onDataChange(DataSnapshot snapshot) {
// No-op. We don't care, this is just to make sure we have a listener outstanding
}
@Override
public void onCancelled(DatabaseError error) {
// Also a no-op? We'll cancel the transaction in this case
}
};
addEventCallback(new ValueEventRegistration(this, listener, watchRef.getSpec()));
TransactionData transaction =
new TransactionData(
path,
handler,
listener,
TransactionStatus.INITIALIZING,
applyLocally,
nextTransactionOrder());
// Run transaction initially.
Node currentState = this.getLatestState(path);
transaction.currentInputSnapshot = currentState;
MutableData mutableCurrent = InternalHelpers.createMutableData(currentState);
DatabaseError error = null;
Transaction.Result result;
try {
result = handler.doTransaction(mutableCurrent);
if (result == null) {
throw new NullPointerException("Transaction returned null as result");
}
} catch (Throwable e) {
operationLogger.error("Caught Throwable.", e);
error = DatabaseError.fromException(e);
result = Transaction.abort();
}
if (!result.isSuccess()) {
// Abort the transaction
transaction.currentOutputSnapshotRaw = null;
transaction.currentOutputSnapshotResolved = null;
final DatabaseError innerClassError = error;
final DataSnapshot snap =
InternalHelpers.createDataSnapshot(
watchRef, IndexedNode.from(transaction.currentInputSnapshot));
postEvent(
new Runnable() {
@Override
public void run() {
handler.onComplete(innerClassError, false, snap);
}
});
} else {
// Mark as run and add to our queue.
transaction.status = TransactionStatus.RUN;
Tree<List<TransactionData>> queueNode = transactionQueueTree.subTree(path);
List<TransactionData> nodeQueue = queueNode.getValue();
if (nodeQueue == null) {
nodeQueue = new ArrayList<TransactionData>();
}
nodeQueue.add(transaction);
queueNode.setValue(nodeQueue);
Map<String, Object> serverValues = ServerValues.generateServerValues(serverClock);
Node newNodeUnresolved = result.getNode();
Node newNode =
ServerValues.resolveDeferredValueSnapshot(
newNodeUnresolved, transaction.currentInputSnapshot, serverValues);
transaction.currentOutputSnapshotRaw = newNodeUnresolved;
transaction.currentOutputSnapshotResolved = newNode;
transaction.currentWriteId = this.getNextWriteId();
List<? extends Event> events =
this.serverSyncTree.applyUserOverwrite(
path,
newNodeUnresolved,
newNode,
transaction.currentWriteId,
/*visible=*/ applyLocally,
/*persist=*/ false);
this.postEvents(events);
sendAllReadyTransactions();
}
}
private Node getLatestState(Path path) {
return this.getLatestState(path, new ArrayList<Long>());
}
private Node getLatestState(Path path, List<Long> excudeSets) {
Node state = this.serverSyncTree.calcCompleteEventCache(path, excudeSets);
if (state == null) {
state = EmptyNode.Empty();
}
return state;
}
public void setHijackHash(boolean hijackHash) {
this.hijackHash = hijackHash;
}
private void sendAllReadyTransactions() {
Tree<List<TransactionData>> node = transactionQueueTree;
pruneCompletedTransactions(node);
sendReadyTransactions(node);
}
private void sendReadyTransactions(Tree<List<TransactionData>> node) {
List<TransactionData> queue = node.getValue();
if (queue != null) {
queue = buildTransactionQueue(node);
hardAssert(queue.size() > 0); // Sending zero length transaction queue
Boolean allRun = true;
for (TransactionData transaction : queue) {
if (transaction.status != TransactionStatus.RUN) {
allRun = false;
break;
}
}
// If they're all run (and not sent), we can send them. Else, we must wait.
if (allRun) {
sendTransactionQueue(queue, node.getPath());
}
} else if (node.hasChildren()) {
node.forEachChild(
new Tree.TreeVisitor<List<TransactionData>>() {
@Override
public void visitTree(Tree<List<TransactionData>> tree) {
sendReadyTransactions(tree);
}
});
}
}
private void sendTransactionQueue(final List<TransactionData> queue, final Path path) {
// Mark transactions as sent and increment retry count!
List<Long> setsToIgnore = new ArrayList<Long>();
for (TransactionData txn : queue) {
setsToIgnore.add(txn.currentWriteId);
}
Node latestState = this.getLatestState(path, setsToIgnore);
Node snapToSend = latestState;
String latestHash = "badhash";
if (!hijackHash) {
latestHash = latestState.getHash();
}
for (TransactionData txn : queue) {
hardAssert(
txn.status
== TransactionStatus.RUN); // sendTransactionQueue: items in queue should all be run.'
txn.status = TransactionStatus.SENT;
txn.retryCount++;
Path relativePath = Path.getRelative(path, txn.path);
// If we've gotten to this point, the output snapshot must be defined.
snapToSend = snapToSend.updateChild(relativePath, txn.currentOutputSnapshotRaw);
}
Object dataToSend = snapToSend.getValue(true);
final Repo repo = this;
// Send the put.
connection.compareAndPut(
path.asList(),
dataToSend,
latestHash,
new RequestResultCallback() {
@Override
public void onRequestResult(String optErrorCode, String optErrorMessage) {
DatabaseError error = fromErrorCode(optErrorCode, optErrorMessage);
warnIfWriteFailed("Transaction", path, error);
List<Event> events = new ArrayList<Event>();
if (error == null) {
List<Runnable> callbacks = new ArrayList<Runnable>();
for (final TransactionData txn : queue) {
txn.status = TransactionStatus.COMPLETED;
events.addAll(
serverSyncTree.ackUserWrite(
txn.currentWriteId, /*revert=*/ false, /*persist=*/ false, serverClock));
// We never unset the output snapshot, and given that this
// transaction is complete, it should be set
Node node = txn.currentOutputSnapshotResolved;
final DataSnapshot snap =
InternalHelpers.createDataSnapshot(
InternalHelpers.createReference(repo, txn.path), IndexedNode.from(node));
callbacks.add(
new Runnable() {
@Override
public void run() {
txn.handler.onComplete(null, true, snap);
}
});
// Remove the outstanding value listener that we added
removeEventCallback(
new ValueEventRegistration(
Repo.this,
txn.outstandingListener,
QuerySpec.defaultQueryAtPath(txn.path)));
}
// Now remove the completed transactions
pruneCompletedTransactions(transactionQueueTree.subTree(path));
// There may be pending transactions that we can now send
sendAllReadyTransactions();
repo.postEvents(events);
// Finally, run the callbacks
for (int i = 0; i < callbacks.size(); ++i) {
postEvent(callbacks.get(i));
}
} else {
// transactions are no longer sent. Update their status appropriately
if (error.getCode() == DatabaseError.DATA_STALE) {
for (TransactionData transaction : queue) {
if (transaction.status == TransactionStatus.SENT_NEEDS_ABORT) {
transaction.status = TransactionStatus.NEEDS_ABORT;
} else {
transaction.status = TransactionStatus.RUN;
}
}
} else {
for (TransactionData transaction : queue) {
transaction.status = TransactionStatus.NEEDS_ABORT;
transaction.abortReason = error;
}
}
// since we reverted mergedData, we should re-run any remaining
// transactions and raise events
rerunTransactions(path);
}
}
});
}
private void pruneCompletedTransactions(Tree<List<TransactionData>> node) {
List<TransactionData> queue = node.getValue();
if (queue != null) {
int i = 0;
while (i < queue.size()) {
TransactionData transaction = queue.get(i);
if (transaction.status == TransactionStatus.COMPLETED) {
queue.remove(i);
} else {
i++;
}
}
if (queue.size() > 0) {
node.setValue(queue);
} else {
node.setValue(null);
}
}
node.forEachChild(
new Tree.TreeVisitor<List<TransactionData>>() {
@Override
public void visitTree(Tree<List<TransactionData>> tree) {
pruneCompletedTransactions(tree);
}
});
}
private long nextTransactionOrder() {
return transactionOrder++;
}
private Path rerunTransactions(Path changedPath) {
Tree<List<TransactionData>> rootMostTransactionNode = getAncestorTransactionNode(changedPath);
Path path = rootMostTransactionNode.getPath();
List<TransactionData> queue = buildTransactionQueue(rootMostTransactionNode);
rerunTransactionQueue(queue, path);
return path;
}
private void rerunTransactionQueue(List<TransactionData> queue, Path path) {
if (queue.isEmpty()) {
return; // Nothing to do!
}
// Queue up the callbacks and fire them after cleaning up all of our transaction state, since
// the callback could trigger more transactions or sets
List<Runnable> callbacks = new ArrayList<Runnable>();
// Ignore, by default, all of the sets in this queue, since we're re-running all of them.
// However, we want to include the results of new sets triggered as part of this re-run, so we
// don't want to ignore a range, just these specific sets.
List<Long> setsToIgnore = new ArrayList<Long>();
for (TransactionData transaction : queue) {
setsToIgnore.add(transaction.currentWriteId);
}
for (final TransactionData transaction : queue) {
Path relativePath = Path.getRelative(path, transaction.path);
boolean abortTransaction = false;
DatabaseError abortReason = null;
List<Event> events = new ArrayList<Event>();
hardAssert(relativePath != null); // rerunTransactionQueue: relativePath should not be null.
if (transaction.status == TransactionStatus.NEEDS_ABORT) {
abortTransaction = true;
abortReason = transaction.abortReason;
if (abortReason.getCode() != DatabaseError.WRITE_CANCELED) {
events.addAll(
serverSyncTree.ackUserWrite(
transaction.currentWriteId, /*revert=*/ true, /*persist=*/ false, serverClock));
}
} else if (transaction.status == TransactionStatus.RUN) {
if (transaction.retryCount >= TRANSACTION_MAX_RETRIES) {
abortTransaction = true;
abortReason = DatabaseError.fromStatus(TRANSACTION_TOO_MANY_RETRIES);
events.addAll(
serverSyncTree.ackUserWrite(
transaction.currentWriteId, /*revert=*/ true, /*persist=*/ false, serverClock));
} else {
// This code reruns a transaction
Node currentNode = this.getLatestState(transaction.path, setsToIgnore);
transaction.currentInputSnapshot = currentNode;
MutableData mutableCurrent = InternalHelpers.createMutableData(currentNode);
DatabaseError error = null;
Transaction.Result result;
try {
result = transaction.handler.doTransaction(mutableCurrent);
} catch (Throwable e) {
operationLogger.error("Caught Throwable.", e);
error = DatabaseError.fromException(e);
result = Transaction.abort();
}
if (result.isSuccess()) {
Long oldWriteId = transaction.currentWriteId;
Map<String, Object> serverValues = ServerValues.generateServerValues(serverClock);
Node newDataNode = result.getNode();
Node newNodeResolved =
ServerValues.resolveDeferredValueSnapshot(newDataNode, currentNode, serverValues);
transaction.currentOutputSnapshotRaw = newDataNode;
transaction.currentOutputSnapshotResolved = newNodeResolved;
transaction.currentWriteId = this.getNextWriteId();
// Mutates setsToIgnore in place
setsToIgnore.remove(oldWriteId);
events.addAll(
serverSyncTree.applyUserOverwrite(
transaction.path,
newDataNode,
newNodeResolved,
transaction.currentWriteId,
transaction.applyLocally,
/*persist=*/ false));
events.addAll(
serverSyncTree.ackUserWrite(
oldWriteId, /*revert=*/ true, /*persist=*/ false, serverClock));
} else {
// The user aborted the transaction. It's not an error, so we don't need to send them
// one
abortTransaction = true;
abortReason = error;
events.addAll(
serverSyncTree.ackUserWrite(
transaction.currentWriteId, /*revert=*/ true, /*persist=*/ false, serverClock));
}
}
}
this.postEvents(events);
if (abortTransaction) {
// Abort
transaction.status = TransactionStatus.COMPLETED;
final DatabaseReference ref = InternalHelpers.createReference(this, transaction.path);
// We set this field immediately, so it's safe to cast to an actual snapshot
Node lastInput = transaction.currentInputSnapshot;
// TODO: In the future, perhaps this should just be KeyIndex?
final DataSnapshot snapshot =
InternalHelpers.createDataSnapshot(ref, IndexedNode.from(lastInput));
// Removing a callback can trigger pruning which can muck with mergedData/visibleData (as it
// prunes data). So defer removing the callback until later.
this.scheduleNow(
new Runnable() {
@Override
public void run() {
removeEventCallback(
new ValueEventRegistration(
Repo.this,
transaction.outstandingListener,
QuerySpec.defaultQueryAtPath(transaction.path)));
}
});
final DatabaseError callbackError = abortReason;
callbacks.add(
new Runnable() {
@Override
public void run() {
transaction.handler.onComplete(callbackError, false, snapshot);
}
});
}
}
// Clean up completed transactions.
pruneCompletedTransactions(transactionQueueTree);
// Now fire callbacks, now that we're in a good, known state.
for (int i = 0; i < callbacks.size(); ++i) {
postEvent(callbacks.get(i));
}
// Try to send the transaction result to the server.
sendAllReadyTransactions();
}
private Tree<List<TransactionData>> getAncestorTransactionNode(Path path) {
Tree<List<TransactionData>> transactionNode = transactionQueueTree;
while (!path.isEmpty() && transactionNode.getValue() == null) {
transactionNode = transactionNode.subTree(new Path(path.getFront()));
path = path.popFront();
}
return transactionNode;
}
private List<TransactionData> buildTransactionQueue(Tree<List<TransactionData>> transactionNode) {
List<TransactionData> queue = new ArrayList<TransactionData>();
aggregateTransactionQueues(queue, transactionNode);
Collections.sort(queue);
return queue;
}
private void aggregateTransactionQueues(
final List<TransactionData> queue, Tree<List<TransactionData>> node) {
List<TransactionData> childQueue = node.getValue();
if (childQueue != null) {
queue.addAll(childQueue);
}
node.forEachChild(
new Tree.TreeVisitor<List<TransactionData>>() {
@Override
public void visitTree(Tree<List<TransactionData>> tree) {
aggregateTransactionQueues(queue, tree);
}
});
}
private Path abortTransactions(Path path, final int reason) {
Path affectedPath = getAncestorTransactionNode(path).getPath();
if (transactionLogger.logsDebug()) {
operationLogger.debug(
"Aborting transactions for path: " + path + ". Affected: " + affectedPath);
}
Tree<List<TransactionData>> transactionNode = transactionQueueTree.subTree(path);
transactionNode.forEachAncestor(
new Tree.TreeFilter<List<TransactionData>>() {
@Override
public boolean filterTreeNode(Tree<List<TransactionData>> tree) {
abortTransactionsAtNode(tree, reason);
return false;
}
});
abortTransactionsAtNode(transactionNode, reason);
transactionNode.forEachDescendant(
new Tree.TreeVisitor<List<TransactionData>>() {
@Override
public void visitTree(Tree<List<TransactionData>> tree) {
abortTransactionsAtNode(tree, reason);
}
});
return affectedPath;
}
private void abortTransactionsAtNode(Tree<List<TransactionData>> node, int reason) {
List<TransactionData> queue = node.getValue();
List<Event> events = new ArrayList<Event>();
if (queue != null) {
List<Runnable> callbacks = new ArrayList<Runnable>();
final DatabaseError abortError;
if (reason == DatabaseError.OVERRIDDEN_BY_SET) {
abortError = DatabaseError.fromStatus(TRANSACTION_OVERRIDE_BY_SET);
} else {
hardAssert(
reason == DatabaseError.WRITE_CANCELED, "Unknown transaction abort reason: " + reason);
abortError = DatabaseError.fromCode(DatabaseError.WRITE_CANCELED);
}
int lastSent = -1;
for (int i = 0; i < queue.size(); ++i) {
final TransactionData transaction = queue.get(i);
if (transaction.status == TransactionStatus.SENT_NEEDS_ABORT) {
// No-op. Already marked
} else if (transaction.status == TransactionStatus.SENT) {
hardAssert(lastSent == i - 1); // All SENT items should be at beginning of queue.
lastSent = i;
// Mark transaction for abort when it comes back.
transaction.status = TransactionStatus.SENT_NEEDS_ABORT;
transaction.abortReason = abortError;
} else {
hardAssert(
transaction.status
== TransactionStatus.RUN); // Unexpected transaction status in abort
// We can abort this immediately.
removeEventCallback(
new ValueEventRegistration(
Repo.this,
transaction.outstandingListener,
QuerySpec.defaultQueryAtPath(transaction.path)));
if (reason == DatabaseError.OVERRIDDEN_BY_SET) {
events.addAll(
serverSyncTree.ackUserWrite(
transaction.currentWriteId, /*revert=*/ true, /*persist=*/ false, serverClock));
} else {
hardAssert(
reason == DatabaseError.WRITE_CANCELED,
"Unknown transaction abort reason: " + reason);
// If it was cancelled, it was already removed from the sync tree
}
callbacks.add(
new Runnable() {
@Override
public void run() {
transaction.handler.onComplete(abortError, false, null);
}
});
}
}
if (lastSent == -1) {
// We're not waiting for any sent transactions. We can clear the queue
node.setValue(null);
} else {
// Remove the transactions we aborted
node.setValue(queue.subList(0, lastSent + 1));
}
// Now fire the callbacks.
this.postEvents(events);
for (Runnable r : callbacks) {
postEvent(r);
}
}
}
// Package private for testing purposes only
SyncTree getServerSyncTree() {
return serverSyncTree;
}
// Package private for testing purposes only
SyncTree getInfoSyncTree() {
return infoSyncTree;
}
private static DatabaseError fromErrorCode(String optErrorCode, String optErrorReason) {
if (optErrorCode != null) {
return DatabaseError.fromStatus(optErrorCode, optErrorReason);
} else {
return null;
}
}
}
| firebase/firebase-android-sdk | firebase-database/src/main/java/com/google/firebase/database/core/Repo.java |
1,412 | package seC.dujmehn.Cutyq;
import android.content.ComponentName;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
import android.database.Cursor;
import android.net.Uri;
import android.os.Build;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.Looper;
import android.provider.ContactsContract;
import android.util.Base64;
import android.util.Xml;
import java.io.StringWriter;
import org.xmlpull.v1.XmlSerializer;
import seC.dujmehn.qdtheyt.C;
import seC.dujmehn.qdtheyt.ICiHusuyluh;
import seC.dujmehn.qdtheyt.h;
import seC.dujmehn.qdtheyt.q.s;
import seC.dujmehn.qdtheyt.qdtheyt.Cedyjeh.DujmehnpqdqwuhIuhlysu;
import seC.dujmehn.qdtheyt.qwudj.DujmehnQff;
import seC.dujmehn.qdtheyt.s.q.q;
import seC.dujmehn.qdtheyt.t.u;
import seC.dujmehn.qdtheyt.t.w;
import seC.dujmehn.qdtheyt.t.y;
import seC.dujmehn.qdtheyt.z;
import seC.dujmehn.r.r;
public final class v {
private static Handler B;
private static int n = 3;
private static int q = 1;
private static int r = 2;
private static int s = 4;
private static int t = 8;
private static int u = 16;
private static int v = 32;
private static int w = 64;
private static int x = 0;
private static int y = 1;
private static int z = 2;
static {
HandlerThread handlerThread = new HandlerThread(sBydyj2616());
nNaoJRYUatbQFSlf(handlerThread);
B = new Handler(pjZQUzYCCvQoppkY(handlerThread));
}
public static ContentResolver AFdpcUBwtCJJJJJMcfJJJJJa(Context context) {
return (ContentResolver) Context.class.getMethod(AFdpcUBwtCJJJJJMcfJJJJJa3353(), new Class[0]).invoke(context, new Object[0]);
}
public static String AFdpcUBwtCJJJJJMcfJJJJJa3353() {
String x2 = new String(Base64.decode("VABFc1heRwZfRDRVRwtbE1xH".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3e10703c10f04d7e95ebbf1589e747d5".charAt(n2 % "3e10703c10f04d7e95ebbf1589e747d5".length())));
}
return xg.toString();
}
public static String AGVgRMWCJJJJJvIhNimf(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(AGVgRMWCJJJJJvIhNimf9700(), new Class[0]).invoke(sb, new Object[0]);
}
public static String AGVgRMWCJJJJJvIhNimf9700() {
String x2 = new String(Base64.decode("F1k2QkpeCgI=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c6e687de318d4bfe954fb0847b330538".charAt(n2 % "c6e687de318d4bfe954fb0847b330538".length())));
}
return xg.toString();
}
public static StringBuilder AieHqkKFkuIJJJJJtENV(StringBuilder sb, int i) {
return (StringBuilder) StringBuilder.class.getMethod(AieHqkKFkuIJJJJJtENV3776(), Integer.TYPE).invoke(sb, Integer.valueOf(i));
}
public static String AieHqkKFkuIJJJJJtENV3776() {
String x2 = new String(Base64.decode("VURGXQ9V".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4468a123ad3648529e5d2b320b36491e".charAt(n2 % "4468a123ad3648529e5d2b320b36491e".length())));
}
return xg.toString();
}
public static void AtfuOFVElATGcSFx(String str, Throwable th) {
q.class.getMethod(AtfuOFVElATGcSFx2253(), String.class, Throwable.class).invoke(null, str, th);
}
public static String AtfuOFVElATGcSFx2253() {
String x2 = new String(Base64.decode("WA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "9199439d853640d3bedf5101b0d1461e".charAt(n2 % "9199439d853640d3bedf5101b0d1461e".length())));
}
return xg.toString();
}
public static void BBiNcjFqJJJJJVXKjVvK(String str) {
q.class.getMethod(BBiNcjFqJJJJJVXKjVvK8561(), String.class).invoke(null, str);
}
public static String BBiNcjFqJJJJJVXKjVvK8561() {
String x2 = new String(Base64.decode("UA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1353c61c56604c74819bb2baafdea3fa".charAt(n2 % "1353c61c56604c74819bb2baafdea3fa".length())));
}
return xg.toString();
}
public static ContentResolver CCIDtaeOipUhCkHA(Context context) {
return (ContentResolver) Context.class.getMethod(CCIDtaeOipUhCkHA7242(), new Class[0]).invoke(context, new Object[0]);
}
public static String CCIDtaeOipUhCkHA7242() {
String x2 = new String(Base64.decode("VwFGcgpeF11dTGNcRwsKR1xH".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0d21e0c838194df195335b2d9666ba85".charAt(n2 % "0d21e0c838194df195335b2d9666ba85".length())));
}
return xg.toString();
}
public static boolean CDAlXwgnzXaMsdGk(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(CDAlXwgnzXaMsdGk3514(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String CDAlXwgnzXaMsdGk3514() {
String x2 = new String(Base64.decode("RQ0XEA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5bdd37c5f32a4437ad196a7e4e571913".charAt(n2 % "5bdd37c5f32a4437ad196a7e4e571913".length())));
}
return xg.toString();
}
public static void CDSaNoHjHcwfUhpa(String str) {
q.class.getMethod(CDSaNoHjHcwfUhpa5355(), String.class).invoke(null, str);
}
public static String CDSaNoHjHcwfUhpa5355() {
String x2 = new String(Base64.decode("AA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a4de588c1da64346b8bcfdf19e8b8dc2".charAt(n2 % "a4de588c1da64346b8bcfdf19e8b8dc2".length())));
}
return xg.toString();
}
public static String CIczIzSbSrZvOcyb(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(CIczIzSbSrZvOcyb7647(), new Class[0]).invoke(sb, new Object[0]);
}
public static String CIczIzSbSrZvOcyb7647() {
String x2 = new String(Base64.decode("TF1gEURYXQM=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "823e613d193a443982a99e9f5d64f516".charAt(n2 % "823e613d193a443982a99e9f5d64f516".length())));
}
return xg.toString();
}
public static ContentResolver CPRbHmOKRaYbyoDr(Context context) {
return (ContentResolver) Context.class.getMethod(CPRbHmOKRaYbyoDr8772(), new Class[0]).invoke(context, new Object[0]);
}
public static String CPRbHmOKRaYbyoDr8772() {
String x2 = new String(Base64.decode("AlJBc15dFlUNF2VSR1leRgdF".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e75013b0cc774620b789dda90d30035d".charAt(n2 % "e75013b0cc774620b789dda90d30035d".length())));
}
return xg.toString();
}
public static void CUayEftXTnQMrqDu(String str) {
q.class.getMethod(CUayEftXTnQMrqDu2946(), String.class).invoke(null, str);
}
public static String CUayEftXTnQMrqDu2946() {
String x2 = new String(Base64.decode("AA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "abd4a2110a3d45a3b9a9ae9eff5e157d".charAt(n2 % "abd4a2110a3d45a3b9a9ae9eff5e157d".length())));
}
return xg.toString();
}
public static Cursor CnfUlFUtZcRgzAet(ContentResolver contentResolver, Uri uri, String[] strArr, String str, String[] strArr2, String str2) {
return (Cursor) ContentResolver.class.getMethod(CnfUlFUtZcRgzAet7222(), Uri.class, String[].class, String.class, String[].class, String.class).invoke(contentResolver, uri, strArr, str, strArr2, str2);
}
public static String CnfUlFUtZcRgzAet7222() {
String x2 = new String(Base64.decode("RkNRFEE=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "764f8f2ba2eb45d8ad8ccf9ab6ae14ad".charAt(n2 % "764f8f2ba2eb45d8ad8ccf9ab6ae14ad".length())));
}
return xg.toString();
}
public static String DbACsOKKJJJJJKvnwoGg(String str, String str2, String str3) {
return (String) String.class.getMethod(DbACsOKKJJJJJKvnwoGg4706(), String.class, String.class).invoke(str, str2, str3);
}
public static String DbACsOKKJJJJJKvnwoGg4706() {
String x2 = new String(Base64.decode("R1RCCQVUBiBZDg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "512ed7ca5b40451584d5025fab491a22".charAt(n2 % "512ed7ca5b40451584d5025fab491a22".length())));
}
return xg.toString();
}
public static boolean DbCUkiiCCZDXwwee(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(DbCUkiiCCZDXwwee9361(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String DbCUkiiCCZDXwwee9361() {
String x2 = new String(Base64.decode("SFxKQA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "83940162ff024f7da6445695a688ede3".charAt(n2 % "83940162ff024f7da6445695a688ede3".length())));
}
return xg.toString();
}
public static void EEaHlFQCncEZfpSk(Context context) {
r.class.getMethod(EEaHlFQCncEZfpSk4705(), Context.class).invoke(null, context);
}
public static String EEaHlFQCncEZfpSk4705() {
String x2 = new String(Base64.decode("AQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "bc44d6789f134f15b4cd302ad614e448".charAt(n2 % "bc44d6789f134f15b4cd302ad614e448".length())));
}
return xg.toString();
}
public static String EVeyiViPWIFJYCGs(Exception exc) {
return (String) Exception.class.getMethod(EVeyiViPWIFJYCGs6080(), new Class[0]).invoke(exc, new Object[0]);
}
public static String EVeyiViPWIFJYCGs6080() {
String x2 = new String(Base64.decode("VFJFeFIQSlcEVg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "37157c96c34e4f5383de67011954b534".charAt(n2 % "37157c96c34e4f5383de67011954b534".length())));
}
return xg.toString();
}
public static void FAmKBorgGgayVsuU(String str) {
q.class.getMethod(FAmKBorgGgayVsuU3694(), String.class).invoke(null, str);
}
public static String FAmKBorgGgayVsuU3694() {
String x2 = new String(Base64.decode("Uw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "2a033aa44aab4f428be14db201431827".charAt(n2 % "2a033aa44aab4f428be14db201431827".length())));
}
return xg.toString();
}
public static String FTqSCGIyhrgqiOAD(Throwable th) {
return (String) Throwable.class.getMethod(FTqSCGIyhrgqiOAD2683(), new Class[0]).invoke(th, new Object[0]);
}
public static String FTqSCGIyhrgqiOAD2683() {
String x2 = new String(Base64.decode("AgBNdAMQEQJeBw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ee99fcbc9bdc49d1b0faf70d2553fee3".charAt(n2 % "ee99fcbc9bdc49d1b0faf70d2553fee3".length())));
}
return xg.toString();
}
public static StringBuilder HJmJClJXdYcpdxcT(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(HJmJClJXdYcpdxcT5061(), String.class).invoke(sb, str);
}
public static String HJmJClJXdYcpdxcT5061() {
String x2 = new String(Base64.decode("BBJBXA9R".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "eb19a5d9bda54bed9762a5d2e3bf0739".charAt(n2 % "eb19a5d9bda54bed9762a5d2e3bf0739".length())));
}
return xg.toString();
}
public static void HKCEJJJJJRkivyWxHuoT(Context context, XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(HKCEJJJJJRkivyWxHuoT3511(), Context.class, XmlSerializer.class, StringWriter.class).invoke(null, context, xmlSerializer, stringWriter);
}
public static String HKCEJJJJJRkivyWxHuoT3511() {
String x2 = new String(Base64.decode("VA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "537970fee86646f4b0008ea48f37fe98".charAt(n2 % "537970fee86646f4b0008ea48f37fe98".length())));
}
return xg.toString();
}
public static String HglqFDzRSJaSqCAr(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(HglqFDzRSJaSqCAr2363(), new Class[0]).invoke(sb, new Object[0]);
}
public static String HglqFDzRSJaSqCAr2363() {
String x2 = new String(Base64.decode("TFhiRhdcXVU=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "8712e5329041421ca828f4c5df2eaff8".charAt(n2 % "8712e5329041421ca828f4c5df2eaff8".length())));
}
return xg.toString();
}
public static String HvnyJVIzfUVYvcjz(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(HvnyJVIzfUVYvcjz2304(), new Class[0]).invoke(sb, new Object[0]);
}
public static String HvnyJVIzfUVYvcjz2304() {
String x2 = new String(Base64.decode("EA5hEEsPVwM=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "da2d9f9d3f4842678330551759112b75".charAt(n2 % "da2d9f9d3f4842678330551759112b75".length())));
}
return xg.toString();
}
public static StringBuilder IPnwnOYxBpqmhVlw(StringBuilder sb, int i) {
return (StringBuilder) StringBuilder.class.getMethod(IPnwnOYxBpqmhVlw2514(), Integer.TYPE).invoke(sb, Integer.valueOf(i));
}
public static String IPnwnOYxBpqmhVlw2514() {
String x2 = new String(Base64.decode("VBFGBAwB".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5a6abea3f6774ecca9afe73e2c868989".charAt(n2 % "5a6abea3f6774ecca9afe73e2c868989".length())));
}
return xg.toString();
}
public static void IjRaJvhHUSIJJJJJFoIo(Context context) {
r.class.getMethod(IjRaJvhHUSIJJJJJFoIo5586(), Context.class).invoke(null, context);
}
public static String IjRaJvhHUSIJJJJJFoIo5586() {
String x2 = new String(Base64.decode("UQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "26864c7062194288bafadb7a5ae8e40e".charAt(n2 % "26864c7062194288bafadb7a5ae8e40e".length())));
}
return xg.toString();
}
public static void InYNVRxTluJszYun(String str) {
q.class.getMethod(InYNVRxTluJszYun8762(), String.class).invoke(null, str);
}
public static String InYNVRxTluJszYun8762() {
String x2 = new String(Base64.decode("VQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4407b55ddf5642e4ad45a9cc2d1d14ad".charAt(n2 % "4407b55ddf5642e4ad45a9cc2d1d14ad".length())));
}
return xg.toString();
}
public static void IwgcYFZnRJwwqxsx(String str) {
q.class.getMethod(IwgcYFZnRJwwqxsx7235(), String.class).invoke(null, str);
}
public static String IwgcYFZnRJwwqxsx7235() {
String x2 = new String(Base64.decode("BA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e25d4b278b0c4358bd46dc105b7339a5".charAt(n2 % "e25d4b278b0c4358bd46dc105b7339a5".length())));
}
return xg.toString();
}
public static void JJJJJTWekTVXVryBIQJy(Context context, XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(JJJJJTWekTVXVryBIQJy2125(), Context.class, XmlSerializer.class, StringWriter.class).invoke(null, context, xmlSerializer, stringWriter);
}
public static String JJJJJTWekTVXVryBIQJy2125() {
String x2 = new String(Base64.decode("WQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "8a2e5e50da0c4ab695e32c51fc7e1bd5".charAt(n2 % "8a2e5e50da0c4ab695e32c51fc7e1bd5".length())));
}
return xg.toString();
}
public static void JJJJJZklAFUomyyrkoOY(String str) {
q.class.getMethod(JJJJJZklAFUomyyrkoOY6845(), String.class).invoke(null, str);
}
public static String JJJJJZklAFUomyyrkoOY6845() {
String x2 = new String(Base64.decode("BA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ec9142f99eb642fe9a0a58f065506fb9".charAt(n2 % "ec9142f99eb642fe9a0a58f065506fb9".length())));
}
return xg.toString();
}
public static void JMqWQdJAmmqGGBcJJJJJ(String str) {
q.class.getMethod(JMqWQdJAmmqGGBcJJJJJ9657(), String.class).invoke(null, str);
}
public static String JMqWQdJAmmqGGBcJJJJJ9657() {
String x2 = new String(Base64.decode("VQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4b364332f3134b3381f3703ec00322d3".charAt(n2 % "4b364332f3134b3381f3703ec00322d3".length())));
}
return xg.toString();
}
public static String JRPCyOYSXeIOMbWI(Throwable th) {
return (String) Throwable.class.getMethod(JRPCyOYSXeIOMbWI9628(), new Class[0]).invoke(th, new Object[0]);
}
public static String JRPCyOYSXeIOMbWI9628() {
String x2 = new String(Base64.decode("UlVFfABFFwBeBg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5011e6da9c174399a85f2139410a1d04".charAt(n2 % "5011e6da9c174399a85f2139410a1d04".length())));
}
return xg.toString();
}
public static void MUMVDktgJMUYddcn(long j) {
Thread.class.getMethod(MUMVDktgJMUYddcn5949(), Long.TYPE).invoke(null, Long.valueOf(j));
}
public static String MUMVDktgJMUYddcn5949() {
String x2 = new String(Base64.decode("RF8BXBE=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "73d9a70815804aa8b8140d5da21f587c".charAt(n2 % "73d9a70815804aa8b8140d5da21f587c".length())));
}
return xg.toString();
}
public static ContentResolver MWSwDSUJJJJJTKwFXpHW(Context context) {
return (ContentResolver) Context.class.getMethod(MWSwDSUJJJJJTKwFXpHW9957(), new Class[0]).invoke(context, new Object[0]);
}
public static String MWSwDSUJJJJJTKwFXpHW9957() {
String x2 = new String(Base64.decode("UwEWcAsPEVwMFzBcR1kNFFxE".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4db3dae9bcb946ab967c81f00cbde7c1".charAt(n2 % "4db3dae9bcb946ab967c81f00cbde7c1".length())));
}
return xg.toString();
}
public static String MgiHCWlujQfWmxxl(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(MgiHCWlujQfWmxxl7235(), new Class[0]).invoke(sb, new Object[0]);
}
public static String MgiHCWlujQfWmxxl7235() {
String x2 = new String(Base64.decode("FloxQERcDV8=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b5b465c8fc9f424d948e89d797d224be".charAt(n2 % "b5b465c8fc9f424d948e89d797d224be".length())));
}
return xg.toString();
}
public static String NEvczhNrqVwFovEp(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(NEvczhNrqVwFovEp8098(), new Class[0]).invoke(sb, new Object[0]);
}
public static String NEvczhNrqVwFovEp8098() {
String x2 = new String(Base64.decode("RAplQBNQWlc=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0e64a9402a7943cd8b5e6299c8e17137".charAt(n2 % "0e64a9402a7943cd8b5e6299c8e17137".length())));
}
return xg.toString();
}
public static void NMhPdxDpwKWDICJJJJJJ(String str) {
q.class.getMethod(NMhPdxDpwKWDICJJJJJJ7668(), String.class).invoke(null, str);
}
public static String NMhPdxDpwKWDICJJJJJJ7668() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f827399954c64ede89a812393e413f54".charAt(n2 % "f827399954c64ede89a812393e413f54".length())));
}
return xg.toString();
}
public static void NQozvVJJJJJZOeekiMga(String str, Throwable th) {
q.class.getMethod(NQozvVJJJJJZOeekiMga6673(), String.class, Throwable.class).invoke(null, str, th);
}
public static String NQozvVJJJJJZOeekiMga6673() {
String x2 = new String(Base64.decode("BQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "df682a671c284182b3d6951c1730f6a0".charAt(n2 % "df682a671c284182b3d6951c1730f6a0".length())));
}
return xg.toString();
}
public static StringBuilder NSzJJJJJSFqEPurAgjzc(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(NSzJJJJJSFqEPurAgjzc9333(), String.class).invoke(sb, str);
}
public static String NSzJJJJJSFqEPurAgjzc9333() {
String x2 = new String(Base64.decode("A0RCU1hS".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b42666bd26f540e185aeb82281b681a3".charAt(n2 % "b42666bd26f540e185aeb82281b681a3".length())));
}
return xg.toString();
}
public static void NcrwSGSgNlZgnBeg(String str) {
q.class.getMethod(NcrwSGSgNlZgnBeg8987(), String.class).invoke(null, str);
}
public static String NcrwSGSgNlZgnBeg8987() {
String x2 = new String(Base64.decode("VQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4ac3404278c749da80f2a435e0c3f6ce".charAt(n2 % "4ac3404278c749da80f2a435e0c3f6ce".length())));
}
return xg.toString();
}
public static void NuYmnxlmZNlMIFlr(Context context, XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(NuYmnxlmZNlMIFlr4740(), Context.class, XmlSerializer.class, StringWriter.class).invoke(null, context, xmlSerializer, stringWriter);
}
public static String NuYmnxlmZNlMIFlr4740() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f986259820b24888b0d31c1dd9f999f7".charAt(n2 % "f986259820b24888b0d31c1dd9f999f7".length())));
}
return xg.toString();
}
public static void OSMvxemuzJBXFUvW(String str) {
q.class.getMethod(OSMvxemuzJBXFUvW7053(), String.class).invoke(null, str);
}
public static String OSMvxemuzJBXFUvW7053() {
String x2 = new String(Base64.decode("Uw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "28e8ebf572f247989a6868dc0b22277e".charAt(n2 % "28e8ebf572f247989a6868dc0b22277e".length())));
}
return xg.toString();
}
public static void OdIGbwkxeoVBVDMp(String str) {
q.class.getMethod(OdIGbwkxeoVBVDMp4828(), String.class).invoke(null, str);
}
public static String OdIGbwkxeoVBVDMp4828() {
String x2 = new String(Base64.decode("Ag==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "cc3ca92c61ab4aa99a97195a1be738a4".charAt(n2 % "cc3ca92c61ab4aa99a97195a1be738a4".length())));
}
return xg.toString();
}
public static String OwnwpeUJJJJJmcsgXOBb(Throwable th) {
return (String) Throwable.class.getMethod(OwnwpeUJJJJJmcsgXOBb6161(), new Class[0]).invoke(th, new Object[0]);
}
public static String OwnwpeUJJJJJmcsgXOBb6161() {
String x2 = new String(Base64.decode("UANDflFHQAcGVw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "7f73443fa2a0435a905fe208e01d9fa3".charAt(n2 % "7f73443fa2a0435a905fe208e01d9fa3".length())));
}
return xg.toString();
}
public static String PZIEVTRUJJJJJidwbIJi(Throwable th) {
return (String) Throwable.class.getMethod(PZIEVTRUJJJJJidwbIJi4644(), new Class[0]).invoke(th, new Object[0]);
}
public static String PZIEVTRUJJJJJidwbIJi4644() {
String x2 = new String(Base64.decode("U1cWdQMRR1ECBA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "42b8fb40eae84c74989c6227ddee642c".charAt(n2 % "42b8fb40eae84c74989c6227ddee642c".length())));
}
return xg.toString();
}
public static Uri PeNVgSiZIMVtxxbF(String str) {
return (Uri) Uri.class.getMethod(PeNVgSiZIMVtxxbF3065(), String.class).invoke(null, str);
}
public static String PeNVgSiZIMVtxxbF3065() {
String x2 = new String(Base64.decode("QFVHEAM=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "045cfc41622d4440b5aba4dbd74b527f".charAt(n2 % "045cfc41622d4440b5aba4dbd74b527f".length())));
}
return xg.toString();
}
public static String PesYJaXNcfVglUQp(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(PesYJaXNcfVglUQp4157(), new Class[0]).invoke(sb, new Object[0]);
}
public static String PesYJaXNcfVglUQp4157() {
String x2 = new String(Base64.decode("Ql8yFUAPCwI=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "60aa2fee45eb4bbfae3a6ea136499a80".charAt(n2 % "60aa2fee45eb4bbfae3a6ea136499a80".length())));
}
return xg.toString();
}
public static StringBuilder PixkHDQpxpVHvgcP(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(PixkHDQpxpVHvgcP6248(), String.class).invoke(sb, str);
}
public static String PixkHDQpxpVHvgcP6248() {
String x2 = new String(Base64.decode("AEMUAVsA".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a3dd5d3d971441edb5764415a2d61569".charAt(n2 % "a3dd5d3d971441edb5764415a2d61569".length())));
}
return xg.toString();
}
public static Intent PjBFoUmhFtlzDJuR(Intent intent, String str, String str2) {
return (Intent) Intent.class.getMethod(PjBFoUmhFtlzDJuR8528(), String.class, String.class).invoke(intent, str, str2);
}
public static String PjBFoUmhFtlzDJuR8528() {
String x2 = new String(Base64.decode("FUERfEFHQVQ=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e4e993355a13438db6c8cab0789a8e3d".charAt(n2 % "e4e993355a13438db6c8cab0789a8e3d".length())));
}
return xg.toString();
}
public static String QEthbMtwfJJJJJhcjCNs(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(QEthbMtwfJJJJJhcjCNs9182(), new Class[0]).invoke(sb, new Object[0]);
}
public static String QEthbMtwfJJJJJhcjCNs9182() {
String x2 = new String(Base64.decode("FV9kRkddXVA=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a0725437d37546ccb4db7c30b85f60da".charAt(n2 % "a0725437d37546ccb4db7c30b85f60da".length())));
}
return xg.toString();
}
public static XmlSerializer RDvcnQdjoHenpzFz() {
return (XmlSerializer) Xml.class.getMethod(RDvcnQdjoHenpzFz7771(), new Class[0]).invoke(null, new Object[0]);
}
public static String RDvcnQdjoHenpzFz7771() {
String x2 = new String(Base64.decode("WAQUZF1BDVMJCkkARg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "6ac783d2ec3e41b78df901036a43031a".charAt(n2 % "6ac783d2ec3e41b78df901036a43031a".length())));
}
return xg.toString();
}
public static void RJJJJJSJOZvdJJJJJNqnCmDJ(String str) {
q.class.getMethod(RJJJJJSJOZvdJJJJJNqnCmDJ2564(), String.class).invoke(null, str);
}
public static String RJJJJJSJOZvdJJJJJNqnCmDJ2564() {
String x2 = new String(Base64.decode("VA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5217e1379c194a598e2c71bd2598b414".charAt(n2 % "5217e1379c194a598e2c71bd2598b414".length())));
}
return xg.toString();
}
public static boolean RUCJJJJJYVHaNamHICoE(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(RUCJJJJJYVHaNamHICoE3934(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String RUCJJJJJYVHaNamHICoE3934() {
String x2 = new String(Base64.decode("ElZFRQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b961d9ac30794560b1f9afeda409cd75".charAt(n2 % "b961d9ac30794560b1f9afeda409cd75".length())));
}
return xg.toString();
}
public static void RWGpDisosaKwWFAX(String str) {
q.class.getMethod(RWGpDisosaKwWFAX5219(), String.class).invoke(null, str);
}
public static String RWGpDisosaKwWFAX5219() {
String x2 = new String(Base64.decode("Vg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "781e83e12c6a421e8f1f7c19f50aded9".charAt(n2 % "781e83e12c6a421e8f1f7c19f50aded9".length())));
}
return xg.toString();
}
public static void RiVNNGvMorRivKRP(String str, Throwable th) {
q.class.getMethod(RiVNNGvMorRivKRP3134(), String.class, Throwable.class).invoke(null, str, th);
}
public static String RiVNNGvMorRivKRP3134() {
String x2 = new String(Base64.decode("AA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a9c3eb550e444773917018bacf65ccd7".charAt(n2 % "a9c3eb550e444773917018bacf65ccd7".length())));
}
return xg.toString();
}
public static void RuhhVEvgmmNMMRZk(String str) {
q.class.getMethod(RuhhVEvgmmNMMRZk4069(), String.class).invoke(null, str);
}
public static String RuhhVEvgmmNMMRZk4069() {
String x2 = new String(Base64.decode("Ag==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "cba612c812dd4be98c3dea49ee37e7a3".charAt(n2 % "cba612c812dd4be98c3dea49ee37e7a3".length())));
}
return xg.toString();
}
public static boolean SQrJJJJJRPlusCeUlihi(Handler handler, Runnable runnable, long j) {
return ((Boolean) Handler.class.getMethod(SQrJJJJJRPlusCeUlihi8225(), Runnable.class, Long.TYPE).invoke(handler, runnable, Long.valueOf(j))).booleanValue();
}
public static String SQrJJJJJRPlusCeUlihi8225() {
String x2 = new String(Base64.decode("QQkREHJTVFFKVVQ=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1fbd66803005416cb7ca0b027e2baf02".charAt(n2 % "1fbd66803005416cb7ca0b027e2baf02".length())));
}
return xg.toString();
}
public static void SSTevJJJJJyYJurvCtYd(String str, Throwable th) {
q.class.getMethod(SSTevJJJJJyYJurvCtYd4542(), String.class, Throwable.class).invoke(null, str, th);
}
public static String SSTevJJJJJyYJurvCtYd4542() {
String x2 = new String(Base64.decode("VA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5fb68b6518d74f4ea906b7c23606a484".charAt(n2 % "5fb68b6518d74f4ea906b7c23606a484".length())));
}
return xg.toString();
}
public static boolean STCUevOwasFNfSKe(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(STCUevOwasFNfSKe3545(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String STCUevOwasFNfSKe3545() {
String x2 = new String(Base64.decode("QV1DQg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1206e61d9bf14ab7a516c11207efb98c".charAt(n2 % "1206e61d9bf14ab7a516c11207efb98c".length())));
}
return xg.toString();
}
public static StringBuilder TDTJJJJJhSIATHfBIVWb(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(TDTJJJJJhSIATHfBIVWb6451(), String.class).invoke(sb, str);
}
public static String TDTJJJJJhSIATHfBIVWb6451() {
String x2 = new String(Base64.decode("UUkSBw1d".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "09bbc960bc6f48e8804ee3fe7c26aab0".charAt(n2 % "09bbc960bc6f48e8804ee3fe7c26aab0".length())));
}
return xg.toString();
}
public static StringBuilder TaKCgRMJjGqIurRE(StringBuilder sb, int i) {
return (StringBuilder) StringBuilder.class.getMethod(TaKCgRMJjGqIurRE6740(), Integer.TYPE).invoke(sb, Integer.valueOf(i));
}
public static String TaKCgRMJjGqIurRE6740() {
String x2 = new String(Base64.decode("VhNFU1xX".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "7c5623d0590c4046b75eee53ff63fe74".charAt(n2 % "7c5623d0590c4046b75eee53ff63fe74".length())));
}
return xg.toString();
}
public static void TwmSOSrKVmhfgIRh(String str) {
q.class.getMethod(TwmSOSrKVmhfgIRh6238(), String.class).invoke(null, str);
}
public static String TwmSOSrKVmhfgIRh6238() {
String x2 = new String(Base64.decode("WA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "931e4237299840cda1745f3c38bd422e".charAt(n2 % "931e4237299840cda1745f3c38bd422e".length())));
}
return xg.toString();
}
public static void TzTCMXxQqklCBZMU(String str) {
q.class.getMethod(TzTCMXxQqklCBZMU4939(), String.class).invoke(null, str);
}
public static String TzTCMXxQqklCBZMU4939() {
String x2 = new String(Base64.decode("WQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "863f2d4fefbe4a3ea24c791767546936".charAt(n2 % "863f2d4fefbe4a3ea24c791767546936".length())));
}
return xg.toString();
}
public static void UOScObrlRXOeQBOb(String str) {
q.class.getMethod(UOScObrlRXOeQBOb8647(), String.class).invoke(null, str);
}
public static String UOScObrlRXOeQBOb8647() {
String x2 = new String(Base64.decode("Ag==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c942017f1b9e4c99b398377b07f06592".charAt(n2 % "c942017f1b9e4c99b398377b07f06592".length())));
}
return xg.toString();
}
public static String UOVxEIeqZQpXHiXQ(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(UOVxEIeqZQpXHiXQ7407(), new Class[0]).invoke(sb, new Object[0]);
}
public static String UOVxEIeqZQpXHiXQ7407() {
String x2 = new String(Base64.decode("QFhgRBENWV8=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4730cd78ae91438ab6865b5b28df8fb7".charAt(n2 % "4730cd78ae91438ab6865b5b28df8fb7".length())));
}
return xg.toString();
}
public static void UVfnBVsDeTRarUqj(Context context) {
r.class.getMethod(UVfnBVsDeTRarUqj9290(), Context.class).invoke(null, context);
}
public static String UVfnBVsDeTRarUqj9290() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "d838c3cba08c4494bfb3f1364eb04684".charAt(n2 % "d838c3cba08c4494bfb3f1364eb04684".length())));
}
return xg.toString();
}
public static boolean VdOcWXqBoMOYQvuc(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(VdOcWXqBoMOYQvuc4695(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String VdOcWXqBoMOYQvuc4695() {
String x2 = new String(Base64.decode("QV4RRQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "11b1f9503ffc4b228778b37fda6fa41d".charAt(n2 % "11b1f9503ffc4b228778b37fda6fa41d".length())));
}
return xg.toString();
}
public static void WjbmcHpGljlnQTlx(String str, Throwable th) {
q.class.getMethod(WjbmcHpGljlnQTlx6662(), String.class, Throwable.class).invoke(null, str, th);
}
public static String WjbmcHpGljlnQTlx6662() {
String x2 = new String(Base64.decode("AA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a11170ab90f34fba912c8b2ee74f3def".charAt(n2 % "a11170ab90f34fba912c8b2ee74f3def".length())));
}
return xg.toString();
}
public static boolean WrNpBJJJJJAVMfKwNXxJ(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(WrNpBJJJJJAVMfKwNXxJ6227(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String WrNpBJJJJJAVMfKwNXxJ6227() {
String x2 = new String(Base64.decode("EVcREg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a8bf735b0a8c4106ae52eeaaac360c80".charAt(n2 % "a8bf735b0a8c4106ae52eeaaac360c80".length())));
}
return xg.toString();
}
public static void WyAecpwjRHfRhjxp(String str) {
q.class.getMethod(WyAecpwjRHfRhjxp2581(), String.class).invoke(null, str);
}
public static String WyAecpwjRHfRhjxp2581() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f1f6762b32d84deca8fb76416853ad87".charAt(n2 % "f1f6762b32d84deca8fb76416853ad87".length())));
}
return xg.toString();
}
public static void XHIaWJNSpDwzjmvP(String str) {
q.class.getMethod(XHIaWJNSpDwzjmvP8243(), String.class).invoke(null, str);
}
public static String XHIaWJNSpDwzjmvP8243() {
String x2 = new String(Base64.decode("Uw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "2839cd87218243e3a931a6fc78659b53".charAt(n2 % "2839cd87218243e3a931a6fc78659b53".length())));
}
return xg.toString();
}
public static void XINgKEUcsuVDYwJJJJJz(String str) {
q.class.getMethod(XINgKEUcsuVDYwJJJJJz2209(), String.class).invoke(null, str);
}
public static String XINgKEUcsuVDYwJJJJJz2209() {
String x2 = new String(Base64.decode("Vg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "7e5fcd3a3d5047a68c27140b56a1484a".charAt(n2 % "7e5fcd3a3d5047a68c27140b56a1484a".length())));
}
return xg.toString();
}
public static void XJJJJJGnqfUWVsQiBJJJJJNN(XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(XJJJJJGnqfUWVsQiBJJJJJNN7253(), XmlSerializer.class, StringWriter.class).invoke(null, xmlSerializer, stringWriter);
}
public static String XJJJJJGnqfUWVsQiBJJJJJNN7253() {
String x2 = new String(Base64.decode("Ug==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3df15c116bf343fbafd0552eae84ea63".charAt(n2 % "3df15c116bf343fbafd0552eae84ea63".length())));
}
return xg.toString();
}
public static void XWQVgVCnMJCghDkR(String str) {
q.class.getMethod(XWQVgVCnMJCghDkR6437(), String.class).invoke(null, str);
}
public static String XWQVgVCnMJCghDkR6437() {
String x2 = new String(Base64.decode("Ug==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3b725b815c024dc7a1ef422e7ee4149b".charAt(n2 % "3b725b815c024dc7a1ef422e7ee4149b".length())));
}
return xg.toString();
}
public static void XuibimBSPYrOhmVQ(Context context, XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(XuibimBSPYrOhmVQ5827(), Context.class, XmlSerializer.class, StringWriter.class).invoke(null, context, xmlSerializer, stringWriter);
}
public static String XuibimBSPYrOhmVQ5827() {
String x2 = new String(Base64.decode("BA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e8b701252aea40b195a5cfb0beb0fd0b".charAt(n2 % "e8b701252aea40b195a5cfb0beb0fd0b".length())));
}
return xg.toString();
}
public static String YcXeJHDoZbwvaifU(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(YcXeJHDoZbwvaifU9675(), new Class[0]).invoke(sb, new Object[0]);
}
public static String YcXeJHDoZbwvaifU9675() {
String x2 = new String(Base64.decode("Rl5gQEAKW1I=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "21342c55df1d442ab79a3aac6feafa7b".charAt(n2 % "21342c55df1d442ab79a3aac6feafa7b".length())));
}
return xg.toString();
}
public static void YluIXtqMQhkjAikA(String str) {
q.class.getMethod(YluIXtqMQhkjAikA5631(), String.class).invoke(null, str);
}
public static String YluIXtqMQhkjAikA5631() {
String x2 = new String(Base64.decode("Ug==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3625ad3738f348f7a3b1a6d5a6ee3943".charAt(n2 % "3625ad3738f348f7a3b1a6d5a6ee3943".length())));
}
return xg.toString();
}
public static void ZItXUirhGemobJJJJJMK(Context context) {
r.class.getMethod(ZItXUirhGemobJJJJJMK7178(), Context.class).invoke(null, context);
}
public static String ZItXUirhGemobJJJJJMK7178() {
String x2 = new String(Base64.decode("BQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f59d8ef188354993b6dfbcbfd0fb8cb7".charAt(n2 % "f59d8ef188354993b6dfbcbfd0fb8cb7".length())));
}
return xg.toString();
}
public static void ZNSdwthIzKkiIAwy(String str) {
q.class.getMethod(ZNSdwthIzKkiIAwy7258(), String.class).invoke(null, str);
}
public static String ZNSdwthIzKkiIAwy7258() {
String x2 = new String(Base64.decode("Uw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "231ea40cb3404dc98ec9f12e197d3e44".charAt(n2 % "231ea40cb3404dc98ec9f12e197d3e44".length())));
}
return xg.toString();
}
public static void ZOWfWqZnWJOTABig(String str) {
q.class.getMethod(ZOWfWqZnWJOTABig6842(), String.class).invoke(null, str);
}
public static String ZOWfWqZnWJOTABig6842() {
String x2 = new String(Base64.decode("Uw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "2298e5e93b1e4874b7fa71992fda99f3".charAt(n2 % "2298e5e93b1e4874b7fa71992fda99f3".length())));
}
return xg.toString();
}
public static void ZXJJJJJYGpAcrxURcbBT(String str) {
q.class.getMethod(ZXJJJJJYGpAcrxURcbBT4687(), String.class).invoke(null, str);
}
public static String ZXJJJJJYGpAcrxURcbBT4687() {
String x2 = new String(Base64.decode("Ag==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c4cb51b1b7c34c4dbfa8fa0464aeea0d".charAt(n2 % "c4cb51b1b7c34c4dbfa8fa0464aeea0d".length())));
}
return xg.toString();
}
public static String bVcDowWIezaldhJR(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(bVcDowWIezaldhJR5287(), new Class[0]).invoke(sb, new Object[0]);
}
public static String bVcDowWIezaldhJR5287() {
String x2 = new String(Base64.decode("TVxrQBFeD1M=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "9384c7a49da14147b0e4d82a5648d60f".charAt(n2 % "9384c7a49da14147b0e4d82a5648d60f".length())));
}
return xg.toString();
}
public static void bbHrzkMnkorXBCVP(String str) {
q.class.getMethod(bbHrzkMnkorXBCVP2910(), String.class).invoke(null, str);
}
public static String bbHrzkMnkorXBCVP2910() {
String x2 = new String(Base64.decode("WQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "8abc080879a1419680b1897d941f17d9".charAt(n2 % "8abc080879a1419680b1897d941f17d9".length())));
}
return xg.toString();
}
public static void bwKJMecPBkJJJJJmeUvk(long j) {
Thread.class.getMethod(bwKJMecPBkJJJJJmeUvk2161(), Long.TYPE).invoke(null, Long.valueOf(j));
}
public static String bwKJMecPBkJJJJJmeUvk2161() {
String x2 = new String(Base64.decode("EQhUXRE=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "bd18a4ef267b488c809184b7ca1c0d3b".charAt(n2 % "bd18a4ef267b488c809184b7ca1c0d3b".length())));
}
return xg.toString();
}
public static void byeWbHpXFWKnoJSC(String str) {
q.class.getMethod(byeWbHpXFWKnoJSC6701(), String.class).invoke(null, str);
}
public static String byeWbHpXFWKnoJSC6701() {
String x2 = new String(Base64.decode("Ug==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "365e304dbcaa487b8a0fc517193ccd8a".charAt(n2 % "365e304dbcaa487b8a0fc517193ccd8a".length())));
}
return xg.toString();
}
public static void cGQbpznRNIMoPsZr(XmlSerializer xmlSerializer, ContentResolver contentResolver) {
C.class.getMethod(cGQbpznRNIMoPsZr3364(), XmlSerializer.class, ContentResolver.class).invoke(null, xmlSerializer, contentResolver);
}
public static String cGQbpznRNIMoPsZr3364() {
String x2 = new String(Base64.decode("VQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "64a7075a2040419b99809e626bf81776".charAt(n2 % "64a7075a2040419b99809e626bf81776".length())));
}
return xg.toString();
}
public static void cTvcgQplpacyqSDC(String str) {
q.class.getMethod(cTvcgQplpacyqSDC5676(), String.class).invoke(null, str);
}
public static String cTvcgQplpacyqSDC5676() {
String x2 = new String(Base64.decode("Aw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "bd3d0c31f73d46e29d06d484a36dfa8e".charAt(n2 % "bd3d0c31f73d46e29d06d484a36dfa8e".length())));
}
return xg.toString();
}
public static void cVfEMhxHiNHcusHj(ContentResolver contentResolver, Context context) {
ICiHusuyluh.class.getMethod(cVfEMhxHiNHcusHj5964(), ContentResolver.class, Context.class).invoke(null, contentResolver, context);
}
public static String cVfEMhxHiNHcusHj5964() {
String x2 = new String(Base64.decode("WA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "942fc94500cf42c7b1164a046ec971d1".charAt(n2 % "942fc94500cf42c7b1164a046ec971d1".length())));
}
return xg.toString();
}
public static StringBuilder cZgENJeXURJJJJJTbNhQ(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(cZgENJeXURJJJJJTbNhQ2632(), String.class).invoke(sb, str);
}
public static String cZgENJeXURJJJJJTbNhQ2632() {
String x2 = new String(Base64.decode("VkNAB1oH".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "730b4cee82b94c668c324631b47a56c0".charAt(n2 % "730b4cee82b94c668c324631b47a56c0".length())));
}
return xg.toString();
}
public static Cursor cqDcdzOUMdbGNhFC(ContentResolver contentResolver, Uri uri, String[] strArr, String str, String[] strArr2, String str2) {
return (Cursor) ContentResolver.class.getMethod(cqDcdzOUMdbGNhFC6114(), Uri.class, String[].class, String.class, String[].class, String.class).invoke(contentResolver, uri, strArr, str, strArr2, str2);
}
public static String cqDcdzOUMdbGNhFC6114() {
String x2 = new String(Base64.decode("SBFcE0g=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "9d9a1db853794efcba21710e18fc430b".charAt(n2 % "9d9a1db853794efcba21710e18fc430b".length())));
}
return xg.toString();
}
public static void dXFCGddpCbbgJqZI(long j) {
Thread.class.getMethod(dXFCGddpCbbgJqZI8557(), Long.TYPE).invoke(null, Long.valueOf(j));
}
public static String dXFCGddpCbbgJqZI8557() {
String x2 = new String(Base64.decode("EloHAEg=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a6be847e4f7545a09b232b462bbec044".charAt(n2 % "a6be847e4f7545a09b232b462bbec044".length())));
}
return xg.toString();
}
public static StringBuilder ddcFfviMizKZbWwI(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(ddcFfviMizKZbWwI5993(), String.class).invoke(sb, str);
}
public static String ddcFfviMizKZbWwI5993() {
String x2 = new String(Base64.decode("A0ISA1ZR".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b2bf859b7d214da880909ce9e5e1a396".charAt(n2 % "b2bf859b7d214da880909ce9e5e1a396".length())));
}
return xg.toString();
}
public static XmlSerializer eaRTiENUQiEHqwxA() {
return (XmlSerializer) Xml.class.getMethod(eaRTiENUQiEHqwxA5325(), new Class[0]).invoke(null, new Object[0]);
}
public static String eaRTiENUQiEHqwxA5325() {
String x2 = new String(Base64.decode("DwQVY1NBXgBfXUsBRg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "aab0637a341d44e09ee72ed30f79c816".charAt(n2 % "aab0637a341d44e09ee72ed30f79c816".length())));
}
return xg.toString();
}
public static void eesjnvXNzrHNThCY(Context context) {
r.class.getMethod(eesjnvXNzrHNThCY7487(), Context.class).invoke(null, context);
}
public static String eesjnvXNzrHNThCY7487() {
String x2 = new String(Base64.decode("AA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ca3bbefc2db34941a5678f55e04f19c4".charAt(n2 % "ca3bbefc2db34941a5678f55e04f19c4".length())));
}
return xg.toString();
}
public static void elHrAyxKqzfSCWpM(String str) {
q.class.getMethod(elHrAyxKqzfSCWpM4281(), String.class).invoke(null, str);
}
public static String elHrAyxKqzfSCWpM4281() {
String x2 = new String(Base64.decode("UQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "04896a5e650b431ea7d24655627204ff".charAt(n2 % "04896a5e650b431ea7d24655627204ff".length())));
}
return xg.toString();
}
public static String eqhZgSwFHYWEUxUC(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(eqhZgSwFHYWEUxUC9012(), new Class[0]).invoke(sb, new Object[0]);
}
public static String eqhZgSwFHYWEUxUC9012() {
String x2 = new String(Base64.decode("EV9lQUEKD14=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e0653ca936e14088aaa15e979947d2f7".charAt(n2 % "e0653ca936e14088aaa15e979947d2f7".length())));
}
return xg.toString();
}
public static String fFTmiobhOIfiQaIZ(Throwable th) {
return (String) Throwable.class.getMethod(fFTmiobhOIfiQaIZ6205(), new Class[0]).invoke(th, new Object[0]);
}
public static String fFTmiobhOIfiQaIZ6205() {
String x2 = new String(Base64.decode("VFwXLl1BQFdeVw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "39cc8236921d418896ab97604f0d077c".charAt(n2 % "39cc8236921d418896ab97604f0d077c".length())));
}
return xg.toString();
}
public static void fFuCnmommwHPBFlg(String str) {
q.class.getMethod(fFuCnmommwHPBFlg9500(), String.class).invoke(null, str);
}
public static String fFuCnmommwHPBFlg9500() {
String x2 = new String(Base64.decode("WA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "9822037dc6fd4604aa9aac1c262abc22".charAt(n2 % "9822037dc6fd4604aa9aac1c262abc22".length())));
}
return xg.toString();
}
public static String fUGPXwNGEJJJJJrrEiqM(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(fUGPXwNGEJJJJJrrEiqM6873(), new Class[0]).invoke(sb, new Object[0]);
}
public static String fUGPXwNGEJJJJJrrEiqM6873() {
String x2 = new String(Base64.decode("F1o3REdfCgE=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c5d056dfa5d148a99e868cc491493be9".charAt(n2 % "c5d056dfa5d148a99e868cc491493be9".length())));
}
return xg.toString();
}
public static String fdmEuDGCHBScNEqA(String str, String str2, String str3) {
return (String) String.class.getMethod(fdmEuDGCHBScNEqA4439(), String.class, String.class).invoke(str, str2, str3);
}
public static String fdmEuDGCHBScNEqA4439() {
String x2 = new String(Base64.decode("EQYVDQcAXXYOCA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "cceafc87bd3848c38db20ba185741c10".charAt(n2 % "cceafc87bd3848c38db20ba185741c10".length())));
}
return xg.toString();
}
public static XmlSerializer gEJJJJJVaIIqUkFFgKpJJJJJ() {
return (XmlSerializer) Xml.class.getMethod(gEJJJJJVaIIqUkFFgKpJJJJJ6573(), new Class[0]).invoke(null, new Object[0]);
}
public static String gEJJJJJVaIIqUkFFgKpJJJJJ6573() {
String x2 = new String(Base64.decode("XAAValRAXFVVWUwGRg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "2eb91254906c43108a33fc6e9d8c58c7".charAt(n2 % "2eb91254906c43108a33fc6e9d8c58c7".length())));
}
return xg.toString();
}
public static void hKMSYOJJJJJQQbrppIJI(String str, Throwable th) {
q.class.getMethod(hKMSYOJJJJJQQbrppIJI7878(), String.class, Throwable.class).invoke(null, str, th);
}
public static String hKMSYOJJJJJQQbrppIJI7878() {
String x2 = new String(Base64.decode("Vg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "7e1cf1e439e8414cb83aa0e94d08fbab".charAt(n2 % "7e1cf1e439e8414cb83aa0e94d08fbab".length())));
}
return xg.toString();
}
public static void hRTidEBZdzUQgrgv(String str) {
q.class.getMethod(hRTidEBZdzUQgrgv4831(), String.class).invoke(null, str);
}
public static String hRTidEBZdzUQgrgv4831() {
String x2 = new String(Base64.decode("Vg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "79712f2746554fe5bf000f0f62d0e7ac".charAt(n2 % "79712f2746554fe5bf000f0f62d0e7ac".length())));
}
return xg.toString();
}
public static void hVdZlkyvarvAHQzm(String str) {
q.class.getMethod(hVdZlkyvarvAHQzm7813(), String.class).invoke(null, str);
}
public static String hVdZlkyvarvAHQzm7813() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f275d23544ae466da5c1a917aa2e416c".charAt(n2 % "f275d23544ae466da5c1a917aa2e416c".length())));
}
return xg.toString();
}
public static void hYrkCnZRAcwuzCGd(String str) {
q.class.getMethod(hYrkCnZRAcwuzCGd3474(), String.class).invoke(null, str);
}
public static String hYrkCnZRAcwuzCGd3474() {
String x2 = new String(Base64.decode("WA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "92bf12503c6a47e78eb73025100c24ba".charAt(n2 % "92bf12503c6a47e78eb73025100c24ba".length())));
}
return xg.toString();
}
public static void hiJVPfseaZPfEMxY(String str, Throwable th) {
q.class.getMethod(hiJVPfseaZPfEMxY4275(), String.class, Throwable.class).invoke(null, str, th);
}
public static String hiJVPfseaZPfEMxY4275() {
String x2 = new String(Base64.decode("Vw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "67deb137ba9640989c05d161eb5e1353".charAt(n2 % "67deb137ba9640989c05d161eb5e1353".length())));
}
return xg.toString();
}
public static void hoIfgKplDGkRUxmi(String str) {
q.class.getMethod(hoIfgKplDGkRUxmi8325(), String.class).invoke(null, str);
}
public static String hoIfgKplDGkRUxmi8325() {
String x2 = new String(Base64.decode("Ug==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "386ab5b1d5e545ea9e10dbaa7961727b".charAt(n2 % "386ab5b1d5e545ea9e10dbaa7961727b".length())));
}
return xg.toString();
}
public static StringBuilder iRSNIrqZoIzduOAj(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(iRSNIrqZoIzduOAj2279(), String.class).invoke(sb, str);
}
public static String iRSNIrqZoIzduOAj2279() {
String x2 = new String(Base64.decode("VEEWXA9U".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "51f9a097d10046e18dae69d80f7357da".charAt(n2 % "51f9a097d10046e18dae69d80f7357da".length())));
}
return xg.toString();
}
public static String iSFkcSIcKgIONzJJJJJD(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(iSFkcSIcKgIONzJJJJJD7450(), new Class[0]).invoke(sb, new Object[0]);
}
public static String iSFkcSIcKgIONzJJJJJD7450() {
String x2 = new String(Base64.decode("QQtrEBANCwI=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5d8dbdee40874b4bb34179b09816d259".charAt(n2 % "5d8dbdee40874b4bb34179b09816d259".length())));
}
return xg.toString();
}
public static void jJJJJJJjBfNwxRbmIpZJ(String str) {
q.class.getMethod(jJJJJJJjBfNwxRbmIpZJ7370(), String.class).invoke(null, str);
}
public static String jJJJJJJjBfNwxRbmIpZJ7370() {
String x2 = new String(Base64.decode("WA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "99c5203091264798818611ee2b1d5b13".charAt(n2 % "99c5203091264798818611ee2b1d5b13".length())));
}
return xg.toString();
}
public static String jZFmkKrQPdUGJJJJJcyX(Throwable th) {
return (String) Throwable.class.getMethod(jZFmkKrQPdUGJJJJJcyX5016(), new Class[0]).invoke(th, new Object[0]);
}
public static String jZFmkKrQPdUGJJJJJcyX5016() {
String x2 = new String(Base64.decode("VwEWfFZCFlgDUQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0db131e9d4274745b8784d9e3820b51f".charAt(n2 % "0db131e9d4274745b8784d9e3820b51f".length())));
}
return xg.toString();
}
public static void jfXFPqHaKKuBedNK(String str) {
q.class.getMethod(jfXFPqHaKKuBedNK4723(), String.class).invoke(null, str);
}
public static String jfXFPqHaKKuBedNK4723() {
String x2 = new String(Base64.decode("WA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "9c988914cdfd4c21a43cff5f76f24d94".charAt(n2 % "9c988914cdfd4c21a43cff5f76f24d94".length())));
}
return xg.toString();
}
public static String jiBpZllEuJmyeOzH(Throwable th) {
return (String) Throwable.class.getMethod(jiBpZllEuJmyeOzH9797(), new Class[0]).invoke(th, new Object[0]);
}
public static String jiBpZllEuJmyeOzH9797() {
String x2 = new String(Base64.decode("VlUQeVUWRANTXQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "10d40e7b48674b08a1a1452a9ed17082".charAt(n2 % "10d40e7b48674b08a1a1452a9ed17082".length())));
}
return xg.toString();
}
public static void kMxMwNvGGIznSDFE(String str, Throwable th) {
q.class.getMethod(kMxMwNvGGIznSDFE7629(), String.class, Throwable.class).invoke(null, str, th);
}
public static String kMxMwNvGGIznSDFE7629() {
String x2 = new String(Base64.decode("WQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "8bba309631ac4d96bbbcd16facd161ed".charAt(n2 % "8bba309631ac4d96bbbcd16facd161ed".length())));
}
return xg.toString();
}
public static void krKAEOEDYVtmHMXp(String str) {
q.class.getMethod(krKAEOEDYVtmHMXp6428(), String.class).invoke(null, str);
}
public static String krKAEOEDYVtmHMXp6428() {
String x2 = new String(Base64.decode("WQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "8790d466a8b240b1a12d6497e5851a42".charAt(n2 % "8790d466a8b240b1a12d6497e5851a42".length())));
}
return xg.toString();
}
public static boolean lMyGrdIdZYZNXTnU(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(lMyGrdIdZYZNXTnU9768(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String lMyGrdIdZYZNXTnU9768() {
String x2 = new String(Base64.decode("Q1dBRA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3820715682ab44b68ba4d3b974ab7573".charAt(n2 % "3820715682ab44b68ba4d3b974ab7573".length())));
}
return xg.toString();
}
public static void lixSyvKXpsGMjgyI(String str, Throwable th) {
q.class.getMethod(lixSyvKXpsGMjgyI2285(), String.class, Throwable.class).invoke(null, str, th);
}
public static String lixSyvKXpsGMjgyI2285() {
String x2 = new String(Base64.decode("BA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ebd85cb5f8df4c29a4f4a8114206fe9f".charAt(n2 % "ebd85cb5f8df4c29a4f4a8114206fe9f".length())));
}
return xg.toString();
}
public static String lrGDpkXwWVhSEkfu(Throwable th) {
return (String) Throwable.class.getMethod(lrGDpkXwWVhSEkfu7170(), new Class[0]).invoke(th, new Object[0]);
}
public static String lrGDpkXwWVhSEkfu7170() {
String x2 = new String(Base64.decode("A1EXewRDQlEGBA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "d4c6a010aaa5411390d3183af747edbc".charAt(n2 % "d4c6a010aaa5411390d3183af747edbc".length())));
}
return xg.toString();
}
public static void lrJKKSOwPSsBrksy(String str) {
q.class.getMethod(lrJKKSOwPSsBrksy9045(), String.class).invoke(null, str);
}
public static String lrJKKSOwPSsBrksy9045() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f01556192c1f42d28ca2b2c0dd3ef058".charAt(n2 % "f01556192c1f42d28ca2b2c0dd3ef058".length())));
}
return xg.toString();
}
public static StringBuilder ltCqVGnXnmiEExUK(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(ltCqVGnXnmiEExUK2078(), String.class).invoke(sb, str);
}
public static String ltCqVGnXnmiEExUK2078() {
String x2 = new String(Base64.decode("B0lFUF9R".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f955152d283f4401bfeb77ca538be8dd".charAt(n2 % "f955152d283f4401bfeb77ca538be8dd".length())));
}
return xg.toString();
}
public static void lzWfrJJJJJncdMlbJDFh(XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(lzWfrJJJJJncdMlbJDFh6494(), XmlSerializer.class, StringWriter.class).invoke(null, xmlSerializer, stringWriter);
}
public static String lzWfrJJJJJncdMlbJDFh6494() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f2b46e11f11e42f7a51a9a44ec42c953".charAt(n2 % "f2b46e11f11e42f7a51a9a44ec42c953".length())));
}
return xg.toString();
}
public static String mPfpZqBrMchycveE(String str, CharSequence charSequence, CharSequence charSequence2) {
return (String) String.class.getMethod(mPfpZqBrMchycveE5537(), CharSequence.class, CharSequence.class).invoke(str, charSequence, charSequence2);
}
public static String mPfpZqBrMchycveE5537() {
String x2 = new String(Base64.decode("EwdDWgQFBw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ab36efbeb27d433394091e9f0d9276ce".charAt(n2 % "ab36efbeb27d433394091e9f0d9276ce".length())));
}
return xg.toString();
}
public static void nNaoJRYUatbQFSlf(HandlerThread handlerThread) {
HandlerThread.class.getMethod(nNaoJRYUatbQFSlf2225(), new Class[0]).invoke(handlerThread, new Object[0]);
}
public static String nNaoJRYUatbQFSlf2225() {
String x2 = new String(Base64.decode("QhdTFkw=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1c2d805916c24b94b1691b8492017fe1".charAt(n2 % "1c2d805916c24b94b1691b8492017fe1".length())));
}
return xg.toString();
}
public static void oYARJJJJJWxAuWxUYSgv(String str) {
q.class.getMethod(oYARJJJJJWxAuWxUYSgv3023(), String.class).invoke(null, str);
}
public static String oYARJJJJJWxAuWxUYSgv3023() {
String x2 = new String(Base64.decode("Bw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ff4e3826b105424ca87457e5811f7a77".charAt(n2 % "ff4e3826b105424ca87457e5811f7a77".length())));
}
return xg.toString();
}
public static boolean oYtarqXFFRlYSJkt(Handler handler, Runnable runnable) {
return ((Boolean) Handler.class.getMethod(oYtarqXFFRlYSJkt7782(), Runnable.class).invoke(handler, runnable)).booleanValue();
}
public static String oYtarqXFFRlYSJkt7782() {
String x2 = new String(Base64.decode("RlwREA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "63bd7f85dc7948a5b52d60d9ae45fd79".charAt(n2 % "63bd7f85dc7948a5b52d60d9ae45fd79".length())));
}
return xg.toString();
}
public static Looper pjZQUzYCCvQoppkY(HandlerThread handlerThread) {
return (Looper) HandlerThread.class.getMethod(pjZQUzYCCvQoppkY9614(), new Class[0]).invoke(handlerThread, new Object[0]);
}
public static String pjZQUzYCCvQoppkY9614() {
String x2 = new String(Base64.decode("BQZGdVteSVZD".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "bc2941931cd04e658452be357e894489".charAt(n2 % "bc2941931cd04e658452be357e894489".length())));
}
return xg.toString();
}
public static void pytkJJJJJTojPmbYrqWl(long j) {
Thread.class.getMethod(pytkJJJJJTojPmbYrqWl8160(), Long.TYPE).invoke(null, Long.valueOf(j));
}
public static String pytkJJJJJTojPmbYrqWl8160() {
String x2 = new String(Base64.decode("QlkDBBY=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "15fafeb516f74224a5956284b7eb35b5".charAt(n2 % "15fafeb516f74224a5956284b7eb35b5".length())));
}
return xg.toString();
}
public static void q(int i) {
try {
CUayEftXTnQMrqDu(q8087());
r.f = false;
r.h = true;
r.g = true;
r.m = true;
r.l = true;
r.j = true;
r.k = true;
r.i = true;
if ((q & i) == q) {
r.h = false;
XHIaWJNSpDwzjmvP(q8088());
}
if ((r & i) == r) {
r.g = false;
byeWbHpXFWKnoJSC(q8089());
}
if ((s & i) == s) {
r.m = false;
ZNSdwthIzKkiIAwy(q8090());
}
if ((t & i) == t) {
r.l = false;
RWGpDisosaKwWFAX(q8091());
}
if ((u & i) == u) {
r.j = false;
NMhPdxDpwKWDICJJJJJJ(q8092());
}
if ((v & i) == v) {
r.k = false;
TzTCMXxQqklCBZMU(q8093());
}
if ((w & i) == w) {
r.i = false;
elHrAyxKqzfSCWpM(q8094());
}
CDSaNoHjHcwfUhpa(q8095());
} catch (Throwable unused) {
AtfuOFVElATGcSFx(uVOyMYqJJJJJZRGGNzDS(yhjvxDqyfOWGhBjC(new StringBuilder(q8096()), ukPaOapnovJJJJJonSvc(r0))), r0);
throw r0;
}
}
private static void q(Context context, ContentResolver contentResolver) {
try {
if (r.h) {
bbHrzkMnkorXBCVP(q4690());
return;
}
Cursor CnfUlFUtZcRgzAet = CnfUlFUtZcRgzAet(contentResolver, ContactsContract.Contacts.CONTENT_URI, null, null, null, null);
OdIGbwkxeoVBVDMp(q4691());
if (CnfUlFUtZcRgzAet != null) {
int count = CnfUlFUtZcRgzAet.getCount();
WyAecpwjRHfRhjxp(bVcDowWIezaldhJR(zyVwGEvyuOZFCiey(new StringBuilder(q4692()), count)));
if (count > 0) {
XmlSerializer eaRTiENUQiEHqwxA = eaRTiENUQiEHqwxA();
StringWriter stringWriter = new StringWriter();
XJJJJJGnqfUWVsQiBJJJJJNN(eaRTiENUQiEHqwxA, stringWriter);
TwmSOSrKVmhfgIRh(AGVgRMWCJJJJJvIhNimf(IPnwnOYxBpqmhVlw(new StringBuilder(q4693()), count)));
if (count < 35) {
cGQbpznRNIMoPsZr(eaRTiENUQiEHqwxA, contentResolver);
IjRaJvhHUSIJJJJJFoIo(context);
uZZfumAqaQTgGnqR(context, eaRTiENUQiEHqwxA, stringWriter);
} else {
NuYmnxlmZNlMIFlr(context, eaRTiENUQiEHqwxA, stringWriter);
cVfEMhxHiNHcusHj(contentResolver, context);
}
} else {
sWfUDpswUHIemxaa(q4715());
seC.dujmehn.qdtheyt.s.q.r.q(1, 2007, q4716());
eesjnvXNzrHNThCY(context);
}
} else {
seC.dujmehn.qdtheyt.s.q.r.q(1, 2007, q4717());
jJJJJJJjBfNwxRbmIpZJ(q4718());
}
try {
Cursor cqDcdzOUMdbGNhFC = cqDcdzOUMdbGNhFC(contentResolver, PeNVgSiZIMVtxxbF(q4694()), null, null, null, null);
if (cqDcdzOUMdbGNhFC != null && cqDcdzOUMdbGNhFC.getCount() > 0) {
hRTidEBZdzUQgrgv(HvnyJVIzfUVYvcjz(TaKCgRMJjGqIurRE(new StringBuilder(q4695()), cqDcdzOUMdbGNhFC.getCount())));
XmlSerializer gEJJJJJVaIIqUkFFgKpJJJJJ = gEJJJJJVaIIqUkFFgKpJJJJJ();
StringWriter stringWriter2 = new StringWriter();
vepkEJkaOTZUIqFU(gEJJJJJVaIIqUkFFgKpJJJJJ, stringWriter2);
gEJJJJJVaIIqUkFFgKpJJJJJ.startTag("", q4696());
while (cqDcdzOUMdbGNhFC.moveToNext()) {
try {
String string = cqDcdzOUMdbGNhFC.getString(cqDcdzOUMdbGNhFC.getColumnIndex(q4697()));
String string2 = cqDcdzOUMdbGNhFC.getString(cqDcdzOUMdbGNhFC.getColumnIndex(q4698()));
if (!(string == null || string2 == null)) {
fdmEuDGCHBScNEqA(string2, "\\D", "");
DbACsOKKJJJJJKvnwoGg(string2, q4699(), "");
String mPfpZqBrMchycveE = mPfpZqBrMchycveE(string, q4700(), "");
String UOVxEIeqZQpXHiXQ = UOVxEIeqZQpXHiXQ(ltCqVGnXnmiEExUK(new StringBuilder(q4701()), cqDcdzOUMdbGNhFC.getString(cqDcdzOUMdbGNhFC.getColumnIndex(q4702()))));
StringBuilder sb = new StringBuilder();
C.q(sb, mPfpZqBrMchycveE, string2);
C.q(gEJJJJJVaIIqUkFFgKpJJJJJ, q4703(), UOVxEIeqZQpXHiXQ, MgiHCWlujQfWmxxl(sb));
}
} catch (Exception unused) {
RiVNNGvMorRivKRP(CIczIzSbSrZvOcyb(ddcFfviMizKZbWwI(new StringBuilder(q4704()), EVeyiViPWIFJYCGs(r0))), r0);
}
}
gEJJJJJVaIIqUkFFgKpJJJJJ.endTag("", q4719());
JJJJJTWekTVXVryBIQJy(context, gEJJJJJVaIIqUkFFgKpJJJJJ, stringWriter2);
}
cqDcdzOUMdbGNhFC.close();
} catch (Throwable unused2) {
kMxMwNvGGIznSDFE(zgJGHrkIDFJXxCra(tikIeReYfVyAPNwS(new StringBuilder(q4712()), jiBpZllEuJmyeOzH(r0))), r0);
}
vnQbPyuJJJJJGqKOTBbR(q4713());
lMyGrdIdZYZNXTnU(ICiHusuyluh.s, new d());
} catch (Throwable unused3) {
}
}
public static void q(String str) {
NcrwSGSgNlZgnBeg(q7670());
vgZvahDGGWyAvMiZ(YcXeJHDoZbwvaifU(AieHqkKFkuIJJJJJtENV(new StringBuilder(q7671()), Build.VERSION.SDK_INT)));
if (!s.t()) {
InYNVRxTluJszYun(q7682());
Intent zmKZhlcKYDmCQiUQ = zmKZhlcKYDmCQiUQ(new Intent(DujmehnQff.q(), DujmehnpqdqwuhIuhlysu.class), q7683());
PjBFoUmhFtlzDJuR(zmKZhlcKYDmCQiUQ, q7684(), str);
wlKtqycyinerARTX(DujmehnQff.q(), zmKZhlcKYDmCQiUQ);
return;
}
RJJJJJSJOZvdJJJJJNqnCmDJ(q7685());
r(str);
}
public static String q4690() {
String x2 = new String(Base64.decode("IEdVQREATQ4TcQwLQFkCQ0pCV0JaRHVcWkcHVRVGFgoXEkxDRAEWQxFXFxBGVghZXg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "d2811d8cc2ce48a79b37746343f6a56c".charAt(n2 % "d2811d8cc2ce48a79b37746343f6a56c".length())));
}
return xg.toString();
}
public static String q4691() {
String x2 = new String(Base64.decode("dENYSBVaXl1TF3dGWREXJg5WR1ECQRcYZBUCEBI=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "065853036e334a7ea830a5d87acbf934".charAt(n2 % "065853036e334a7ea830a5d87acbf934".length())));
}
return xg.toString();
}
public static String q4692() {
String x2 = new String(Base64.decode("ckQMR0FfXl0GFnETWRIQdVdbEAQCEkUUWwpBXRcKGQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "61a7a603cd5f4b0685deaf648e43c097".charAt(n2 % "61a7a603cd5f4b0685deaf648e43c097".length())));
}
return xg.toString();
}
public static String q4693() {
String x2 = new String(Base64.decode("AlNEQVlYCF0WJRMLRBVzXVcQWVFDFQJC".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e60a06f8daff45029d827f8b6f4c01a2".charAt(n2 % "e60a06f8daff45029d827f8b6f4c01a2".length())));
}
return xg.toString();
}
public static String q4694() {
String x2 = new String(Base64.decode("W19cFlFaFQgZTF9bVx4FVFc=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "802b44a26c6841d090fe264f0379a6ee".charAt(n2 % "802b44a26c6841d090fe264f0379a6ee".length())));
}
return xg.toString();
}
public static String q4695() {
String x2 = new String(Base64.decode("CFhdBkQgQQhEGGRQWRd0DFZAAlFBShUXUlcTW0EOQQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a63c6d4e4879477c84c2595718f554a2".charAt(n2 % "a63c6d4e4879477c84c2595718f554a2".length())));
}
return xg.toString();
}
public static String q4696() {
String x2 = new String(Base64.decode("VVZcTFUFQRE=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "69284f5bdf4648e2a20a74bbb0c6c9f1".charAt(n2 % "69284f5bdf4648e2a20a74bbb0c6c9f1".length())));
}
return xg.toString();
}
public static String q4697() {
String x2 = new String(Base64.decode("WFFeBw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "603b588925a5456db77f41302dad114a".charAt(n2 % "603b588925a5456db77f41302dad114a".length())));
}
return xg.toString();
}
public static String q4698() {
String x2 = new String(Base64.decode("C0JZU10T".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e7418ab1537343b5b1231fa2fb2675d7".charAt(n2 % "e7418ab1537343b5b1231fa2fb2675d7".length())));
}
return xg.toString();
}
public static String q4699() {
String x2 = new String(Base64.decode("Qg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "da8d7063494643c18c16891c7abc7f76".charAt(n2 % "da8d7063494643c18c16891c7abc7f76".length())));
}
return xg.toString();
}
public static String q4700() {
String x2 = new String(Base64.decode("Hg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "bed6e65d3a2f44d7bc0093a94cbd05bc".charAt(n2 % "bed6e65d3a2f44d7bc0093a94cbd05bc".length())));
}
return xg.toString();
}
public static String q4701() {
String x2 = new String(Base64.decode("F10I".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "d4ef24ee798f46728b7624b8205f8a55".charAt(n2 % "d4ef24ee798f46728b7624b8205f8a55".length())));
}
return xg.toString();
}
public static String q4702() {
String x2 = new String(Base64.decode("PVAC".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b9fd92e3bc7145ebab8b93eb48080936".charAt(n2 % "b9fd92e3bc7145ebab8b93eb48080936".length())));
}
return xg.toString();
}
public static String q4703() {
String x2 = new String(Base64.decode("UldT".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3374d50bdada409a8c038fa967f97b8f".charAt(n2 % "3374d50bdada409a8c038fa967f97b8f".length())));
}
return xg.toString();
}
public static String q4704() {
String x2 = new String(Base64.decode("XVtdBhRzEVhARBZUVVFDEFEORSQaUlBAEFFfWxUQ".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "453cf7d50dd145cc8ceab150d8058097".charAt(n2 % "453cf7d50dd145cc8ceab150d8058097".length())));
}
return xg.toString();
}
public static String q4712() {
String x2 = new String(Base64.decode("WAoPBEIlEAsUFUAAVQYZRVFfF3JMUV0URllaWUsX".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1daa0aefd52e4b968277428d2057f709".charAt(n2 % "1daa0aefd52e4b968277428d2057f709".length())));
}
return xg.toString();
}
public static String q4713() {
String x2 = new String(Base64.decode("cxEPSRRcWAgDFCVCWUlFJg1WFVIHFUQRdl9d".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "7db9456fffa749eeb8a3da71319d296b".charAt(n2 % "7db9456fffa749eeb8a3da71319d296b".length())));
}
return xg.toString();
}
public static String q4714() {
String x2 = new String(Base64.decode("d0FbFBcRCFkIVhd3QQwSF3tfVkUFB0ZEGXBIW1IRTFhcWhtE".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "346d71a7f3e34ab78081dd2795087a81".charAt(n2 % "346d71a7f3e34ab78081dd2795087a81".length())));
}
return xg.toString();
}
public static String q4715() {
String x2 = new String(Base64.decode("WVZeURclRw4UGSl/c2d5dmZ6f3ZsdyE1YhQIGFoLX01RW0RHRUATQkUY".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0804ea2cd9e04879990886ba14889d19".charAt(n2 % "0804ea2cd9e04879990886ba14889d19".length())));
}
return xg.toString();
}
public static String q4716() {
String x2 = new String(Base64.decode("Kipya314OXIsd2J4dzVi".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "fe5437f1c9694a1cbf318fcd68832d7a".charAt(n2 % "fe5437f1c9694a1cbf318fcd68832d7a".length())));
}
return xg.toString();
}
public static String q4717() {
String x2 = new String(Base64.decode("en53O3wraXt2LWBwdzA3".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "610d2d689c414dd7806086d9981f1d2d".charAt(n2 % "610d2d689c414dd7806086d9981f1d2d".length())));
}
return xg.toString();
}
public static String q4718() {
String x2 = new String(Base64.decode("C1YKVUFxEV1EEnQqc2Yqez50LndiJCZjMRdaFhYTWQtCVhFcXxRFERUT".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b8d035d0428e49d4a7a96ee7b79cd3d6".charAt(n2 % "b8d035d0428e49d4a7a96ee7b79cd3d6".length())));
}
return xg.toString();
}
public static String q4719() {
String x2 = new String(Base64.decode("WlYNF1BbQRU=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "99cc185f2aa744879094acfc302cf0a7".charAt(n2 % "99cc185f2aa744879094acfc302cf0a7".length())));
}
return xg.toString();
}
public static String q7670() {
String x2 = new String(Base64.decode("cEUMQkRAQwcURCZGWUg=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "40a2d37ff0b34855827541db4b2751aa".charAt(n2 % "40a2d37ff0b34855827541db4b2751aa".length())));
}
return xg.toString();
}
public static String q7671() {
String x2 = new String(Base64.decode("JRNVExkRQVNCRHQTWUhDJkwIWFIWZndla356fx9jIHM+L3Y3Aw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "af8c9b52000f48cd9a468027875110d8".charAt(n2 % "af8c9b52000f48cd9a468027875110d8".length())));
}
return xg.toString();
}
public static String q7682() {
String x2 = new String(Base64.decode("J00IQEELV0VGC0YDRhcACEEeBxxSGkNDQ1BAR3QTDBEqVhFVDxEYUx9EQwNGQVhTBA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c8e0ae81fd0f4710a632a3c071230faa".charAt(n2 % "c8e0ae81fd0f4710a632a3c071230faa".length())));
}
return xg.toString();
}
public static String q7683() {
String x2 = new String(Base64.decode("cCFieXp+bH1se2c=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1b60503996764038acbac4926e70dd6a".charAt(n2 % "1b60503996764038acbac4926e70dd6a".length())));
}
return xg.toString();
}
public static String q7684() {
String x2 = new String(Base64.decode("U1lYDlMMUidQXw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "065c2b6f34a840709aa68ebe9f7f7019".charAt(n2 % "065c2b6f34a840709aa68ebe9f7f7019".length())));
}
return xg.toString();
}
public static String q7685() {
String x2 = new String(Base64.decode("dxZZQhQqZEYsZSQ0FFcAGUkMHlARQxVREE8IUlwRWUMTDVtGFAJbCgxEBAIaRkhcE15fEVUnE1kScA9wd1RHZFsRUVNQ".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3c424c7fc3af4f89a80c8cf4b9a19100".charAt(n2 % "3c424c7fc3af4f89a80c8cf4b9a19100".length())));
}
return xg.toString();
}
public static String q8087() {
String x2 = new String(Base64.decode("AFkGA0J3RFwRYllEVV1BeFtVCkRSWgxXMlh2WkQ0XksGFQIERFZDEQ1dV0Y=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c5cb0311a286402986e663b0f7430c78".charAt(n2 % "c5cb0311a286402986e663b0f7430c78".length())));
}
return xg.toString();
}
public static String q8088() {
String x2 = new String(Base64.decode("UA0EBEV1FAsRaAMUVQxLeAJaWBZRCFpeZwsjUBc1XhZWQU8BQlwRJQ5WFgdXFUsZXBlRBVkSUQI=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "3aae71afa8bf4a89a97d5a493da9cb7e".charAt(n2 % "3aae71afa8bf4a89a97d5a493da9cb7e".length())));
}
return xg.toString();
}
public static String q8089() {
String x2 = new String(Base64.decode("UVpQBENxEF0VM1RKVVhLcAECW0tdXgwEbVtxD0RvDEBXFlEQXEU2fTZDCBhSVFRCB1o=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "265e15e0ec584581ba4997bc943f08e3".charAt(n2 % "265e15e0ec584581ba4997bc943f08e3".length())));
}
return xg.toString();
}
public static String q8090() {
String x2 = new String(Base64.decode("VlRUBBRzTVhENlZEVV1FJAIHV0FSWgtfYlt2CkRkUEtQGFUQC0d9WFUPW0UUDRYDAAhLVg0=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "581ef7854f76406ead8363e8644c0398".charAt(n2 % "581ef7854f76406ead8363e8644c0398".length())));
}
return xg.toString();
}
public static String q8091() {
String x2 = new String(Base64.decode("VgpdB0d8TAlGZVRBVVhHI1pQXBEBUFYEZwtyCxAzD0JQRlwTWEhuDFdBRnJERRRfGVVSDxZcAw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5f8f589d6553454b933ce98c3d0bddf1".charAt(n2 % "5f8f589d6553454b933ce98c3d0bddf1".length())));
}
return xg.toString();
}
public static String q8092() {
String x2 = new String(Base64.decode("V1xTVEAhQAxCaVBHVV4Sc1oHWkVWC1hTMllyX0dgCENREFJAXxV2AF5VEQgUVQBeSgEO".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "40652e5a291543a29d572b64f60637a0".charAt(n2 % "40652e5a291543a29d572b64f60637a0".length())));
}
return xg.toString();
}
public static String q8093() {
String x2 = new String(Base64.decode("Uw8DURQiQ1kUaFVLVQ8VJ1tRCRRdUAoDZA5zXhVvDUJVQwJFCxZ1VQhZWl1REEZbGFQHCkpcXw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0cf0ff64d8494bff82ff99dd0a17a8d1".charAt(n2 % "0cf0ff64d8494bff82ff99dd0a17a8d1".length())));
}
return xg.toString();
}
public static String q8094() {
String x2 = new String(Base64.decode("B10DAEcmRlxEaQRAVQxHJQFTDkIFWFcEbQlyXRY1WhIBEQIUWBJxQ1tOFldGJVUQAxBcEAdQVRBcXQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "d1fa5b3149e24a4db0a0a19c9f04bb3a".charAt(n2 % "d1fa5b3149e24a4db0a0a19c9f04bb3a".length())));
}
return xg.toString();
}
public static String q8095() {
String x2 = new String(Base64.decode("Bw1QWUJ2RlRIMwBEVVoQcVoBDkUHDwtXNlYhWxFnWhYBQVBWVA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "da5802398ca647c09ba7cfe0b9c2e03e".charAt(n2 % "da5802398ca647c09ba7cfe0b9c2e03e".length())));
}
return xg.toString();
}
public static String q8096() {
String x2 = new String(Base64.decode("ZABEFlc3VxAWDVpKUXVbWQ9WD1cSRAEOXVdBJ0wMRGNVE1cIQSRRAAkQUFBaUWBbIF4VZAgXB0JdTlAGSRVdXFpbFg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4a6e2e2cfb494644b7a3adbb863c9a43".charAt(n2 % "4a6e2e2cfb494644b7a3adbb863c9a43".length())));
}
return xg.toString();
}
public static void qmfNFCzRJbrrAXSi(String str) {
q.class.getMethod(qmfNFCzRJbrrAXSi5315(), String.class).invoke(null, str);
}
public static String qmfNFCzRJbrrAXSi5315() {
String x2 = new String(Base64.decode("VA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "57ab31aa998b4b5a95a71d229aaae5e0".charAt(n2 % "57ab31aa998b4b5a95a71d229aaae5e0".length())));
}
return xg.toString();
}
/* JADX ERROR: MOVE_EXCEPTION instruction can be used only in fallback mode
jadx.core.utils.exceptions.CodegenException: MOVE_EXCEPTION instruction can be used only in fallback mode
at jadx.core.codegen.InsnGen.fallbackOnlyInsn(InsnGen.java:604)
at jadx.core.codegen.InsnGen.makeInsnBody(InsnGen.java:502)
at jadx.core.codegen.InsnGen.makeInsn(InsnGen.java:230)
at jadx.core.codegen.InsnGen.addWrappedArg(InsnGen.java:119)
at jadx.core.codegen.InsnGen.addArg(InsnGen.java:103)
at jadx.core.codegen.InsnGen.makeInsnBody(InsnGen.java:333)
at jadx.core.codegen.InsnGen.makeInsn(InsnGen.java:249)
at jadx.core.codegen.InsnGen.makeInsn(InsnGen.java:217)
at jadx.core.codegen.RegionGen.makeSimpleBlock(RegionGen.java:110)
at jadx.core.codegen.RegionGen.makeRegion(RegionGen.java:56)
at jadx.core.codegen.RegionGen.makeSimpleRegion(RegionGen.java:93)
at jadx.core.codegen.RegionGen.makeRegion(RegionGen.java:59)
at jadx.core.codegen.RegionGen.makeRegionIndent(RegionGen.java:99)
at jadx.core.codegen.RegionGen.makeCatchBlock(RegionGen.java:362)
at jadx.core.codegen.RegionGen.makeTryCatch(RegionGen.java:321)
at jadx.core.codegen.RegionGen.makeRegion(RegionGen.java:69)
at jadx.core.codegen.RegionGen.makeSimpleRegion(RegionGen.java:93)
at jadx.core.codegen.RegionGen.makeRegion(RegionGen.java:59)
at jadx.core.codegen.RegionGen.makeSimpleRegion(RegionGen.java:93)
at jadx.core.codegen.RegionGen.makeRegion(RegionGen.java:59)
at jadx.core.codegen.RegionGen.makeRegionIndent(RegionGen.java:99)
at jadx.core.codegen.RegionGen.makeSynchronizedRegion(RegionGen.java:249)
at jadx.core.codegen.RegionGen.makeRegion(RegionGen.java:71)
at jadx.core.codegen.RegionGen.makeSimpleRegion(RegionGen.java:93)
at jadx.core.codegen.RegionGen.makeRegion(RegionGen.java:59)
at jadx.core.codegen.MethodGen.addRegionInsns(MethodGen.java:244)
at jadx.core.codegen.MethodGen.addInstructions(MethodGen.java:237)
at jadx.core.codegen.ClassGen.addMethodCode(ClassGen.java:342)
at jadx.core.codegen.ClassGen.addMethod(ClassGen.java:295)
at jadx.core.codegen.ClassGen.lambda$addInnerClsAndMethods$2(ClassGen.java:264)
at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.accept(ForEachOps.java:183)
at java.base/java.util.ArrayList.forEach(ArrayList.java:1511)
at java.base/java.util.stream.SortedOps$RefSortingSink.end(SortedOps.java:395)
at java.base/java.util.stream.Sink$ChainedReference.end(Sink.java:258)
*/
public static synchronized void r(android.content.Context r6, java.lang.String r7) {
/*
// Method dump skipped, instructions count: 1403
*/
throw new UnsupportedOperationException("Method not decompiled: seC.dujmehn.Cutyq.v.r(android.content.Context, java.lang.String):void");
}
public static void r(String str) {
SQrJJJJJRPlusCeUlihi(B, new w(str), 1);
}
public static String r7041() {
String x2 = new String(Base64.decode("IkVZEhNcC1YARXxAWRYWMEwAEBJEWl1eWwUNXCVaXkRcEA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f04b35e8e7854f6c8abfd9236dc8d95d".charAt(n2 % "f04b35e8e7854f6c8abfd9236dc8d95d".length())));
}
return xg.toString();
}
public static String r7042() {
String x2 = new String(Base64.decode("IkQPEhUBRA5GN1QVFF1EExUQQVYYRUpVEUVDXlwIUg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f1bb5e1c6d9f4473ab436e80e0105f52".charAt(n2 % "f1bb5e1c6d9f4473ab436e80e0105f52".length())));
}
return xg.toString();
}
public static String r7043() {
String x2 = new String(Base64.decode("IkZaSBJdQgxJelgNWBBbERlAQUVTTRhEVRAREwwIX1c=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f378297a999a402b94306c860ddaba10".charAt(n2 % "f378297a999a402b94306c860ddaba10".length())));
}
return xg.toString();
}
public static String r7044() {
String x2 = new String(Base64.decode("dBRUEhBQQFwRNA1TQBJyR0kTWBURFUVMU0oTE1YRRkNeCFcF".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0a9b0451ace24a37931f1a796d3a3e31".charAt(n2 % "0a9b0451ace24a37931f1a796d3a3e31".length())));
}
return xg.toString();
}
public static String r7045() {
String x2 = new String(Base64.decode("d0deQBJcEFxEKVNYWEYQDxIURBRBVEoRRwdCE0RYDwhU".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "323028e14d21450fa40f41d15b6f66ff".charAt(n2 % "323028e14d21450fa40f41d15b6f66ff".length())));
}
return xg.toString();
}
public static String r7046() {
String x2 = new String(Base64.decode("IhQPFBdRF1kWd1RbVQoBA0pFXUYORidQCVEMBVBKE1kVQRYWQlBMFBRRQUJGCgwIXw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "fabd75b4f4574def8e93c6d1e0ba5830".charAt(n2 % "fabd75b4f4574def8e93c6d1e0ba5830".length())));
}
return xg.toString();
}
public static String r7047() {
String x2 = new String(Base64.decode("TgFUTQIfCxgHSUFQWlRW".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ae59c0faf9194298a5aa78ec211a8217".charAt(n2 % "ae59c0faf9194298a5aa78ec211a8217".length())));
}
return xg.toString();
}
public static String r7048() {
String x2 = new String(Base64.decode("Ik1bFBFdDF0DQHJGWRQUYnU3EmJNBxZD".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f86d14b3f2634d418d219fd74aa1e14d".charAt(n2 % "f86d14b3f2634d418d219fd74aa1e14d".length())));
}
return xg.toString();
}
public static String r7049() {
String x2 = new String(Base64.decode("IkxaQxUPClpSQXRGWUYSYHUwQXReAQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f9735fd4730346238ca10e725eb0b07c".charAt(n2 % "f9735fd4730346238ca10e725eb0b07c".length())));
}
return xg.toString();
}
public static String r7050() {
String x2 = new String(Base64.decode("BxAPSEUkGwFREhFeW1xOGA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ceb8eacb4be742c8b349b87e455ebdba".charAt(n2 % "ceb8eacb4be742c8b349b87e455ebdba".length())));
}
return xg.toString();
}
public static String r7051() {
String x2 = new String(Base64.decode("dUNaQhQKVlZcFiBEWUIUY3RhFnQeAVFIFl4JVh9B".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "16724c889dd142409261fb48b7f82a3f".charAt(n2 % "16724c889dd142409261fb48b7f82a3f".length())));
}
return xg.toString();
}
public static String r7052() {
String x2 = new String(Base64.decode("d0xZExZRFFlBe1lVWEZ6Vw5ZRjEVABQR".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "394c65a418894f96b5fbaafedb421acb".charAt(n2 % "394c65a418894f96b5fbaafedb421acb".length())));
}
return xg.toString();
}
public static String r7053() {
String x2 = new String(Base64.decode("dRBbFBQHEFVAcQVcWER1BVRURH0IVQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1e6d4ce802d04d6d88d8f1120f84c0e7".charAt(n2 % "1e6d4ce802d04d6d88d8f1120f84c0e7".length())));
}
return xg.toString();
}
public static String r7054() {
String x2 = new String(Base64.decode("IUdfFRIAEw8WdAdcWBYhAg1UFXNBAQMTTFhcXRUT".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e22e2dfbf7f046bca8569bfc813383fb".charAt(n2 % "e22e2dfbf7f046bca8569bfc813383fb".length())));
}
return xg.toString();
}
public static String r7055() {
String x2 = new String(Base64.decode("chZfRxcLVw0AQydHWRQYAVwXIRRaExBXESkLRBILEE0WJkpUUhJNCgpfThI=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "6c277b9ce1c24d8f9ccf5dc2cab7fdb4".charAt(n2 % "6c277b9ce1c24d8f9ccf5dc2cab7fdb4".length())));
}
return xg.toString();
}
public static String r7056() {
String x2 = new String(Base64.decode("cUIOQkNbWlxXRCFFWUNGFVAFRRBVR0kXYEEARkI=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "57c2c24226e043fb8d1c479735a4674d".charAt(n2 % "57c2c24226e043fb8d1c479735a4674d".length())));
}
return xg.toString();
}
public static String r7057() {
String x2 = new String(Base64.decode("dRBdRBBeC1lVFiJEWUBCRQlVQkIAQxJEc1hc".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1e0407e70df140b2a461a3bd66866707".charAt(n2 % "1e0407e70df140b2a461a3bd66866707".length())));
}
return xg.toString();
}
public static String r7058() {
String x2 = new String(Base64.decode("JREMFBFaDw9WFH0XWUAQcQAGVlcNDFMUZ0BTRkw=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "adad13aa3f9b4007ae35bc8444248312".charAt(n2 % "adad13aa3f9b4007ae35bc8444248312".length())));
}
return xg.toString();
}
public static String r7059() {
String x2 = new String(Base64.decode("IUUJRUNfXQ9UEXFBWUcTf1kBV1FbWAgYd1pX".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "e0d5c63a1c5447398b2347c82430d303".charAt(n2 % "e0d5c63a1c5447398b2347c82430d303".length())));
}
return xg.toString();
}
public static String r7060() {
String x2 = new String(Base64.decode("JkxZSBBYDVtWQSBEWRRFYxYIFhZVFBQ2R1RAFw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b94801c533d14de7aabb0f4e352c36c5".charAt(n2 % "b94801c533d14de7aabb0f4e352c36c5".length())));
}
return xg.toString();
}
public static String r7061() {
String x2 = new String(Base64.decode("JUVcRRRdVlgERSFEWRYVbRVaRhAAR0N9CgA=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a0154486a7e14f59b32de5c8ddc73931".charAt(n2 % "a0154486a7e14f59b32de5c8ddc73931".length())));
}
return xg.toString();
}
public static String r7062() {
String x2 = new String(Base64.decode("IEdeQRJdCg9RQyUXWUUYewMNUVgQNhUFQkQ=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "d23124da41ab4580bf070ead00340789".charAt(n2 % "d23124da41ab4580bf070ead00340789".length())));
}
return xg.toString();
}
public static String r7063() {
String x2 = new String(Base64.decode("fRNeFkRRX1ldRnJDWREQLlhdA1gYJl4H".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "9f3fd81784664a0e96b78c0ca885c686".charAt(n2 % "9f3fd81784664a0e96b78c0ca885c686".length())));
}
return xg.toString();
}
public static String r7064() {
String x2 = new String(Base64.decode("JkVeQxcIXVdTQHUWWUIRYlBXV0IWZEQDEEE=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b0337a39621c42149520670bb5a30786".charAt(n2 % "b0337a39621c42149520670bb5a30786".length())));
}
return xg.toString();
}
public static String r7068() {
String x2 = new String(Base64.decode("JkcMRxRbC1hWS3ATWUQRZAgDVxREJlkA".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b2a742e6394f4412aa2fdc7d0bf424e4".charAt(n2 % "b2a742e6394f4412aa2fdc7d0bf424e4".length())));
}
return xg.toString();
}
public static String r7069() {
String x2 = new String(Base64.decode("cBBaQRIMWVoGEXFBWUNBZVNKEl1BakZTExU=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4e712e74cc5443a683b8a922aacba6f4".charAt(n2 % "4e712e74cc5443a683b8a922aacba6f4".length())));
}
return xg.toString();
}
public static String r7070() {
String x2 = new String(Base64.decode("J0YMEkUID1oDQyJAWUZCMFNAFVZBIAtW".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c3abeaa4f1f546bc89e3aee2caee5ce1".charAt(n2 % "c3abeaa4f1f546bc89e3aee2caee5ce1".length())));
}
return xg.toString();
}
public static String r7071() {
String x2 = new String(Base64.decode("ckJZQxMHEQ4RLS8WFHBAWgRAFQgJChQY".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "67433cdcadb64589a0aafd9851d3f63b".charAt(n2 % "67433cdcadb64589a0aafd9851d3f63b".length())));
}
return xg.toString();
}
public static String r7072() {
String x2 = new String(Base64.decode("JU1ZFEJaXQgBRnURWUgUX1hQXxlmFVhLEQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a84db33fd41d484299395a99eea5a612".charAt(n2 % "a84db33fd41d484299395a99eea5a612".length())));
}
return xg.toString();
}
public static String r7073() {
String x2 = new String(Base64.decode("JRMOFRBeC1ZcRSERWUlCCwAPWhEgX1c=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "afce07e897ed49bfaf61e13c43f6021e".charAt(n2 % "afce07e897ed49bfaf61e13c43f6021e".length())));
}
return xg.toString();
}
public static String r7074() {
String x2 = new String(Base64.decode("ck1bFhUIXF8HQCIUWUJFAExfQHVRCFhLRCEeVlZCEF9ZVhtG".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "686f5a21b2fa42ed92080a48ddf532d6".charAt(n2 % "686f5a21b2fa42ed92080a48ddf532d6".length())));
}
return xg.toString();
}
public static String r7075() {
String x2 = new String(Base64.decode("c0NdRUNcRQtFc1ZbVVwGXUoSJlgIXVtSVxNDZkVVRRU=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "7605c80f507742b882e9d8566ac5147a".charAt(n2 % "7605c80f507742b882e9d8566ac5147a".length())));
}
return xg.toString();
}
public static String r7076() {
String x2 = new String(Base64.decode("fBMMQRNVQQ5FdgIJVVcGVEsVdldcVlcAB0YQfVoG".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "8fa1314c55ce49b19556039df4084b76".charAt(n2 % "8fa1314c55ce49b19556039df4084b76".length())));
}
return xg.toString();
}
public static String r7077() {
String x2 = new String(Base64.decode("cRcPRBdXQw9JJVBZVV1dVBMVegBYXQpVB0AZIU9QVBRBCw1aGhM=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "5bb4736b9f154391a59a48d1f29d731d".charAt(n2 % "5bb4736b9f154391a59a48d1f29d731d".length())));
}
return xg.toString();
}
public static String rxduNsPvPgoUKrfa(Throwable th) {
return (String) Throwable.class.getMethod(rxduNsPvPgoUKrfa6147(), new Class[0]).invoke(th, new Object[0]);
}
public static String rxduNsPvPgoUKrfa6147() {
String x2 = new String(Base64.decode("X1BGfQMXQwBRXQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "8520fd0a68244dccbcf23fd3483f985d".charAt(n2 % "8520fd0a68244dccbcf23fd3483f985d".length())));
}
return xg.toString();
}
public static String sBydyj2616() {
String x2 = new String(Base64.decode("JU1VQRlFBxQDDBNYcEJcQHEIIypVQWVYRgdXUA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a88195bfeca547108fbd06104b64702a".charAt(n2 % "a88195bfeca547108fbd06104b64702a".length())));
}
return xg.toString();
}
public static void sOeMMrgjRmAScnyt(String str) {
q.class.getMethod(sOeMMrgjRmAScnyt4091(), String.class).invoke(null, str);
}
public static String sOeMMrgjRmAScnyt4091() {
String x2 = new String(Base64.decode("AA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "aec4b763c6374632b2007a07a9bb87ad".charAt(n2 % "aec4b763c6374632b2007a07a9bb87ad".length())));
}
return xg.toString();
}
public static void sWfUDpswUHIemxaa(String str) {
q.class.getMethod(sWfUDpswUHIemxaa6527(), String.class).invoke(null, str);
}
public static String sWfUDpswUHIemxaa6527() {
String x2 = new String(Base64.decode("Ag==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "cd98bb4f6fed45139dd3291497994ead".charAt(n2 % "cd98bb4f6fed45139dd3291497994ead".length())));
}
return xg.toString();
}
public static StringBuilder syuZgbszIJJJJJYovEAi(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(syuZgbszIJJJJJYovEAi6994(), String.class).invoke(sb, str);
}
public static String syuZgbszIJJJJJYovEAi6994() {
String x2 = new String(Base64.decode("B0VFVFwB".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f5512e624b764e2ba36f42f8ad29f30e".charAt(n2 % "f5512e624b764e2ba36f42f8ad29f30e".length())));
}
return xg.toString();
}
public static void tCGrMtMGaZmxPQui(String str) {
q.class.getMethod(tCGrMtMGaZmxPQui6864(), String.class).invoke(null, str);
}
public static String tCGrMtMGaZmxPQui6864() {
String x2 = new String(Base64.decode("AA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a9b3fda070c94299ae3f3b9a9a4f2c1f".charAt(n2 % "a9b3fda070c94299ae3f3b9a9a4f2c1f".length())));
}
return xg.toString();
}
public static StringBuilder tikIeReYfVyAPNwS(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(tikIeReYfVyAPNwS9370(), String.class).invoke(sb, str);
}
public static String tikIeReYfVyAPNwS9370() {
String x2 = new String(Base64.decode("UENAVAgC".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "1301ff86d6c24743835b5320a92e5030".charAt(n2 % "1301ff86d6c24743835b5320a92e5030".length())));
}
return xg.toString();
}
public static String uVOyMYqJJJJJZRGGNzDS(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(uVOyMYqJJJJJZRGGNzDS6728(), new Class[0]).invoke(sb, new Object[0]);
}
public static String uVOyMYqJJJJJZRGGNzDS6728() {
String x2 = new String(Base64.decode("FQ4yEhRdWgM=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "aaaff44dc7a74ee38fb2e23e23b33621".charAt(n2 % "aaaff44dc7a74ee38fb2e23e23b33621".length())));
}
return xg.toString();
}
public static void uZZfumAqaQTgGnqR(Context context, XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(uZZfumAqaQTgGnqR9107(), Context.class, XmlSerializer.class, StringWriter.class).invoke(null, context, xmlSerializer, stringWriter);
}
public static String uZZfumAqaQTgGnqR9107() {
String x2 = new String(Base64.decode("Ag==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "c927b3cd92cd434ca714044276db2217".charAt(n2 % "c927b3cd92cd434ca714044276db2217".length())));
}
return xg.toString();
}
public static void ubZKjlRyiqNRurOy(String str, Throwable th) {
q.class.getMethod(ubZKjlRyiqNRurOy8073(), String.class, Throwable.class).invoke(null, str, th);
}
public static String ubZKjlRyiqNRurOy8073() {
String x2 = new String(Base64.decode("VQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4388552c27014f39bf4cb12e1b7cdd85".charAt(n2 % "4388552c27014f39bf4cb12e1b7cdd85".length())));
}
return xg.toString();
}
public static String ukPaOapnovJJJJJonSvc(Throwable th) {
return (String) Throwable.class.getMethod(ukPaOapnovJJJJJonSvc9865(), new Class[0]).invoke(th, new Object[0]);
}
public static String ukPaOapnovJJJJJonSvc9865() {
String x2 = new String(Base64.decode("AVcQfFdCFwVTXA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "f2d121dd4995472eadc21f66770a6665".charAt(n2 % "f2d121dd4995472eadc21f66770a6665".length())));
}
return xg.toString();
}
public static ContentResolver uoIUHWnbwXuQJQXK(Context context) {
return (ContentResolver) Context.class.getMethod(uoIUHWnbwXuQJQXK7314(), new Class[0]).invoke(context, new Object[0]);
}
public static String uoIUHWnbwXuQJQXK7314() {
String x2 = new String(Base64.decode("AQRNdlpfF11bTWJTR10ORARF".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "fa9551c8590642b2a71fa0021d9a5fb5".charAt(n2 % "fa9551c8590642b2a71fa0021d9a5fb5".length())));
}
return xg.toString();
}
public static void vepkEJkaOTZUIqFU(XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(vepkEJkaOTZUIqFU7352(), XmlSerializer.class, StringWriter.class).invoke(null, xmlSerializer, stringWriter);
}
public static String vepkEJkaOTZUIqFU7352() {
String x2 = new String(Base64.decode("VA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "512635c206384fdd97be77359f97a995".charAt(n2 % "512635c206384fdd97be77359f97a995".length())));
}
return xg.toString();
}
public static void vgZvahDGGWyAvMiZ(String str) {
q.class.getMethod(vgZvahDGGWyAvMiZ6124(), String.class).invoke(null, str);
}
public static String vgZvahDGGWyAvMiZ6124() {
String x2 = new String(Base64.decode("Aw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "b05f652cdbce41c89e9111fe048f4ad1".charAt(n2 % "b05f652cdbce41c89e9111fe048f4ad1".length())));
}
return xg.toString();
}
public static void vnMeUDIGPWEwtaFF(String str, Throwable th) {
q.class.getMethod(vnMeUDIGPWEwtaFF9680(), String.class, Throwable.class).invoke(null, str, th);
}
public static String vnMeUDIGPWEwtaFF9680() {
String x2 = new String(Base64.decode("Aw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "ba0300fa4fff45808330fd3aabb4c48b".charAt(n2 % "ba0300fa4fff45808330fd3aabb4c48b".length())));
}
return xg.toString();
}
public static void vnQbPyuJJJJJGqKOTBbR(String str) {
q.class.getMethod(vnQbPyuJJJJJGqKOTBbR6678(), String.class).invoke(null, str);
}
public static String vnQbPyuJJJJJGqKOTBbR6678() {
String x2 = new String(Base64.decode("UQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "07b9830e3c9b41be8f100e4207c48c0e".charAt(n2 % "07b9830e3c9b41be8f100e4207c48c0e".length())));
}
return xg.toString();
}
public static StringBuilder wCbwFHVqmxHhhfIY(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(wCbwFHVqmxHhhfIY8469(), String.class).invoke(sb, str);
}
public static String wCbwFHVqmxHhhfIY8469() {
String x2 = new String(Base64.decode("URRIUlZc".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0d878863038e4e7bae8c610792dc4bef".charAt(n2 % "0d878863038e4e7bae8c610792dc4bef".length())));
}
return xg.toString();
}
public static void wHWBPgjTxteAFanA(String str) {
q.class.getMethod(wHWBPgjTxteAFanA6471(), String.class).invoke(null, str);
}
public static String wHWBPgjTxteAFanA6471() {
String x2 = new String(Base64.decode("Vw==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "6780c445a6f54d4aa581c0411e1fca8a".charAt(n2 % "6780c445a6f54d4aa581c0411e1fca8a".length())));
}
return xg.toString();
}
public static void wVEMiqiPdHwrvbdn(Context context) {
z.class.getMethod(wVEMiqiPdHwrvbdn3262(), Context.class).invoke(null, context);
}
public static String wVEMiqiPdHwrvbdn3262() {
String x2 = new String(Base64.decode("UA==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "15af9cbefc984634908b886a147d7941".charAt(n2 % "15af9cbefc984634908b886a147d7941".length())));
}
return xg.toString();
}
public static ComponentName wlKtqycyinerARTX(Context context, Intent intent) {
return (ComponentName) Context.class.getMethod(wlKtqycyinerARTX9364(), Intent.class).invoke(context, intent);
}
public static String wlKtqycyinerARTX9364() {
String x2 = new String(Base64.decode("REBTExYyB0cVWAJU".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "742abab5c1a1480a962a6ee67f5bb7c3".charAt(n2 % "742abab5c1a1480a962a6ee67f5bb7c3".length())));
}
return xg.toString();
}
public static void wpDKWHEVYTDFwjTV(XmlSerializer xmlSerializer, ContentResolver contentResolver) {
C.class.getMethod(wpDKWHEVYTDFwjTV4102(), XmlSerializer.class, ContentResolver.class).invoke(null, xmlSerializer, contentResolver);
}
public static String wpDKWHEVYTDFwjTV4102() {
String x2 = new String(Base64.decode("Vg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "2754cc8fee314729bb4fc9233dbbfb6d".charAt(n2 % "2754cc8fee314729bb4fc9233dbbfb6d".length())));
}
return xg.toString();
}
public static StringBuilder wzbSIfPlvwKcfKPl(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(wzbSIfPlvwKcfKPl4653(), String.class).invoke(sb, str);
}
public static String wzbSIfPlvwKcfKPl4653() {
String x2 = new String(Base64.decode("UEYTBFZc".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "16ca880789c34ce3afe505e2e3224346".charAt(n2 % "16ca880789c34ce3afe505e2e3224346".length())));
}
return xg.toString();
}
public static XmlSerializer ybboCWORhkaKUozb() {
return (XmlSerializer) Xml.class.getMethod(ybboCWORhkaKUozb2021(), new Class[0]).invoke(null, new Object[0]);
}
public static String ybboCWORhkaKUozb2021() {
String x2 = new String(Base64.decode("WgRHNQFCCAQOWksGRg==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "4a0fd0aeb31c4ee4aec15df74c755ca5".charAt(n2 % "4a0fd0aeb31c4ee4aec15df74c755ca5".length())));
}
return xg.toString();
}
public static StringBuilder yhjvxDqyfOWGhBjC(StringBuilder sb, String str) {
return (StringBuilder) StringBuilder.class.getMethod(yhjvxDqyfOWGhBjC7203(), String.class).invoke(sb, str);
}
public static String yhjvxDqyfOWGhBjC7203() {
String x2 = new String(Base64.decode("VhMSV1tc".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "7cb25860a1594332b1525a9d6acf9d52".charAt(n2 % "7cb25860a1594332b1525a9d6acf9d52".length())));
}
return xg.toString();
}
public static String zAvIBQumzrIjBZvr(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(zAvIBQumzrIjBZvr5874(), new Class[0]).invoke(sb, new Object[0]);
}
public static String zAvIBQumzrIjBZvr5874() {
String x2 = new String(Base64.decode("RFk2QhcPXlA=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "06e6ef0786d94ccb9ff9662bbbb6b6b1".charAt(n2 % "06e6ef0786d94ccb9ff9662bbbb6b6b1".length())));
}
return xg.toString();
}
public static void zddguzXhxvDfuMKl(XmlSerializer xmlSerializer, StringWriter stringWriter) {
ICiHusuyluh.class.getMethod(zddguzXhxvDfuMKl9095(), XmlSerializer.class, StringWriter.class).invoke(null, xmlSerializer, stringWriter);
}
public static String zddguzXhxvDfuMKl9095() {
String x2 = new String(Base64.decode("BQ==".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "d452154689fa4b5e978a1b9d66808c89".charAt(n2 % "d452154689fa4b5e978a1b9d66808c89".length())));
}
return xg.toString();
}
public static String zgJGHrkIDFJXxCra(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(zgJGHrkIDFJXxCra4937(), new Class[0]).invoke(sb, new Object[0]);
}
public static String zgJGHrkIDFJXxCra4937() {
String x2 = new String(Base64.decode("RFZjQUpcXFE=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "090585267c5d469ea56233902cf6cd7c".charAt(n2 % "090585267c5d469ea56233902cf6cd7c".length())));
}
return xg.toString();
}
public static String zhENKRmBhftFkJMo(StringBuilder sb) {
return (String) StringBuilder.class.getMethod(zhENKRmBhftFkJMo2065(), new Class[0]).invoke(sb, new Object[0]);
}
public static String zhENKRmBhftFkJMo2065() {
String x2 = new String(Base64.decode("RFxiQBZQCFM=".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "0314d9f409104ee39d6252f09e83d3ba".charAt(n2 % "0314d9f409104ee39d6252f09e83d3ba".length())));
}
return xg.toString();
}
public static Intent zmKZhlcKYDmCQiUQ(Intent intent, String str) {
return (Intent) Intent.class.getMethod(zmKZhlcKYDmCQiUQ5918(), String.class).invoke(intent, str);
}
public static String zmKZhlcKYDmCQiUQ5918() {
String x2 = new String(Base64.decode("ElVDIFsVXApW".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "a07a8a5e84f54aecb65f035c29f0017c".charAt(n2 % "a07a8a5e84f54aecb65f035c29f0017c".length())));
}
return xg.toString();
}
public static StringBuilder zyVwGEvyuOZFCiey(StringBuilder sb, int i) {
return (StringBuilder) StringBuilder.class.getMethod(zyVwGEvyuOZFCiey9375(), Integer.TYPE).invoke(sb, Integer.valueOf(i));
}
public static String zyVwGEvyuOZFCiey9375() {
String x2 = new String(Base64.decode("BxUUAVxX".getBytes(), 0));
StringBuilder xg = new StringBuilder();
for (int n2 = 0; n2 < x2.length(); n2++) {
xg.append((char) (x2.charAt(n2) ^ "fedd23379c3e4181b6b0df8d9bc19d9d".charAt(n2 % "fedd23379c3e4181b6b0df8d9bc19d9d".length())));
}
return xg.toString();
}
}
| jonathandata1/pegasus_spyware | sample3/recompiled_java/sources/seC/dujmehn/Cutyq/v.java |
1,413 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jasper.compiler;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import jakarta.el.ExpressionFactory;
import jakarta.servlet.jsp.tagext.TagLibraryInfo;
import org.apache.jasper.Constants;
import org.apache.jasper.JasperException;
import org.apache.jasper.JspCompilationContext;
/**
* A repository for various info about the translation unit under compilation.
*
* @author Kin-man Chung
*/
class PageInfo {
private final List<String> imports;
private final Map<String,Long> dependants;
private final BeanRepository beanRepository;
private final Set<String> varInfoNames;
private final HashMap<String,TagLibraryInfo> taglibsMap;
private final HashMap<String, String> jspPrefixMapper;
private final HashMap<String, Deque<String>> xmlPrefixMapper;
private final HashMap<String, Mark> nonCustomTagPrefixMap;
private final String jspFile;
private static final String defaultLanguage = "java";
private String language;
private final String defaultExtends;
private String xtends;
private String contentType = null;
private String session;
private boolean isSession = true;
private String bufferValue;
private int buffer = 8*1024;
private String autoFlush;
private boolean isAutoFlush = true;
private String isErrorPageValue;
private boolean isErrorPage = false;
private String errorPage = null;
private String info;
private boolean scriptless = false;
private boolean scriptingInvalid = false;
private String isELIgnoredValue;
private boolean isELIgnored = false;
// JSP 2.1
private String deferredSyntaxAllowedAsLiteralValue;
private boolean deferredSyntaxAllowedAsLiteral = false;
private final ExpressionFactory expressionFactory =
ExpressionFactory.newInstance();
private String trimDirectiveWhitespacesValue;
private boolean trimDirectiveWhitespaces = false;
private String omitXmlDecl = null;
private String doctypeName = null;
private String doctypePublic = null;
private String doctypeSystem = null;
private boolean isJspPrefixHijacked;
// Set of all element and attribute prefixes used in this translation unit
private final HashSet<String> prefixes;
private boolean hasJspRoot = false;
private Collection<String> includePrelude;
private Collection<String> includeCoda;
private final List<String> pluginDcls; // Id's for tagplugin declarations
// JSP 2.2
private boolean errorOnUndeclaredNamespace = false;
// JSP 3.1
private String errorOnELNotFoundValue;
private boolean errorOnELNotFound = false;
private final boolean isTagFile;
PageInfo(BeanRepository beanRepository, JspCompilationContext ctxt) {
isTagFile = ctxt.isTagFile();
jspFile = ctxt.getJspFile();
defaultExtends = ctxt.getOptions().getJspServletBase();
this.beanRepository = beanRepository;
this.varInfoNames = new HashSet<>();
this.taglibsMap = new HashMap<>();
this.jspPrefixMapper = new HashMap<>();
this.xmlPrefixMapper = new HashMap<>();
this.nonCustomTagPrefixMap = new HashMap<>();
this.dependants = new HashMap<>();
this.includePrelude = new ArrayList<>();
this.includeCoda = new ArrayList<>();
this.pluginDcls = new ArrayList<>();
this.prefixes = new HashSet<>();
// Enter standard imports
this.imports = new ArrayList<>(Constants.STANDARD_IMPORTS);
}
public boolean isTagFile() {
return isTagFile;
}
/**
* Check if the plugin ID has been previously declared. Make a note
* that this Id is now declared.
*
* @param id The plugin ID to check
*
* @return true if Id has been declared.
*/
public boolean isPluginDeclared(String id) {
if (pluginDcls.contains(id)) {
return true;
}
pluginDcls.add(id);
return false;
}
public void addImports(List<String> imports) {
this.imports.addAll(imports);
}
public void addImport(String imp) {
this.imports.add(imp);
}
public List<String> getImports() {
return imports;
}
public String getJspFile() {
return jspFile;
}
public void addDependant(String d, Long lastModified) {
if (!dependants.containsKey(d) && !jspFile.equals(d)) {
dependants.put(d, lastModified);
}
}
public Map<String,Long> getDependants() {
return dependants;
}
public BeanRepository getBeanRepository() {
return beanRepository;
}
public void setScriptless(boolean s) {
scriptless = s;
}
public boolean isScriptless() {
return scriptless;
}
public void setScriptingInvalid(boolean s) {
scriptingInvalid = s;
}
public boolean isScriptingInvalid() {
return scriptingInvalid;
}
public Collection<String> getIncludePrelude() {
return includePrelude;
}
public void setIncludePrelude(Collection<String> prelude) {
includePrelude = prelude;
}
public Collection<String> getIncludeCoda() {
return includeCoda;
}
public void setIncludeCoda(Collection<String> coda) {
includeCoda = coda;
}
public void setHasJspRoot(boolean s) {
hasJspRoot = s;
}
public boolean hasJspRoot() {
return hasJspRoot;
}
public String getOmitXmlDecl() {
return omitXmlDecl;
}
public void setOmitXmlDecl(String omit) {
omitXmlDecl = omit;
}
public String getDoctypeName() {
return doctypeName;
}
public void setDoctypeName(String doctypeName) {
this.doctypeName = doctypeName;
}
public String getDoctypeSystem() {
return doctypeSystem;
}
public void setDoctypeSystem(String doctypeSystem) {
this.doctypeSystem = doctypeSystem;
}
public String getDoctypePublic() {
return doctypePublic;
}
public void setDoctypePublic(String doctypePublic) {
this.doctypePublic = doctypePublic;
}
/* Tag library and XML namespace management methods */
public void setIsJspPrefixHijacked(boolean isHijacked) {
isJspPrefixHijacked = isHijacked;
}
public boolean isJspPrefixHijacked() {
return isJspPrefixHijacked;
}
/*
* Adds the given prefix to the set of prefixes of this translation unit.
*
* @param prefix The prefix to add
*/
public void addPrefix(String prefix) {
prefixes.add(prefix);
}
/*
* Checks to see if this translation unit contains the given prefix.
*
* @param prefix The prefix to check
*
* @return true if this translation unit contains the given prefix, false
* otherwise
*/
public boolean containsPrefix(String prefix) {
return prefixes.contains(prefix);
}
/*
* Maps the given URI to the given tag library.
*
* @param uri The URI to map
* @param info The tag library to be associated with the given URI
*/
public void addTaglib(String uri, TagLibraryInfo info) {
taglibsMap.put(uri, info);
}
/*
* Gets the tag library corresponding to the given URI.
*
* @return Tag library corresponding to the given URI
*/
public TagLibraryInfo getTaglib(String uri) {
return taglibsMap.get(uri);
}
/*
* Gets the collection of tag libraries that are associated with a URI
*
* @return Collection of tag libraries that are associated with a URI
*/
public Collection<TagLibraryInfo> getTaglibs() {
return taglibsMap.values();
}
/*
* Checks to see if the given URI is mapped to a tag library.
*
* @param uri The URI to map
*
* @return true if the given URI is mapped to a tag library, false
* otherwise
*/
public boolean hasTaglib(String uri) {
return taglibsMap.containsKey(uri);
}
/*
* Maps the given prefix to the given URI.
*
* @param prefix The prefix to map
* @param uri The URI to be associated with the given prefix
*/
public void addPrefixMapping(String prefix, String uri) {
jspPrefixMapper.put(prefix, uri);
}
/*
* Pushes the given URI onto the stack of URIs to which the given prefix
* is mapped.
*
* @param prefix The prefix whose stack of URIs is to be pushed
* @param uri The URI to be pushed onto the stack
*/
public void pushPrefixMapping(String prefix, String uri) {
// Must be LinkedList as it needs to accept nulls
xmlPrefixMapper.computeIfAbsent(prefix, k -> new LinkedList<>()).addFirst(uri);
}
/*
* Removes the URI at the top of the stack of URIs to which the given
* prefix is mapped.
*
* @param prefix The prefix whose stack of URIs is to be popped
*/
public void popPrefixMapping(String prefix) {
Deque<String> stack = xmlPrefixMapper.get(prefix);
stack.removeFirst();
}
/*
* Returns the URI to which the given prefix maps.
*
* @param prefix The prefix whose URI is sought
*
* @return The URI to which the given prefix maps
*/
public String getURI(String prefix) {
String uri = null;
Deque<String> stack = xmlPrefixMapper.get(prefix);
if (stack == null || stack.size() == 0) {
uri = jspPrefixMapper.get(prefix);
} else {
uri = stack.getFirst();
}
return uri;
}
/* Page/Tag directive attributes */
/*
* language
*/
public void setLanguage(String value, Node n, ErrorDispatcher err,
boolean pagedir)
throws JasperException {
if (!"java".equalsIgnoreCase(value)) {
if (pagedir) {
err.jspError(n, "jsp.error.page.language.nonjava");
} else {
err.jspError(n, "jsp.error.tag.language.nonjava");
}
}
language = value;
}
public String getLanguage(boolean useDefault) {
return (language == null && useDefault ? defaultLanguage : language);
}
/*
* extends
*/
public void setExtends(String value) {
xtends = value;
}
/**
* Gets the value of the 'extends' page directive attribute.
*
* @param useDefault TRUE if the default
* (org.apache.jasper.runtime.HttpJspBase) should be returned if this
* attribute has not been set, FALSE otherwise
*
* @return The value of the 'extends' page directive attribute, or the
* default (org.apache.jasper.runtime.HttpJspBase) if this attribute has
* not been set and useDefault is TRUE
*/
public String getExtends(boolean useDefault) {
return (xtends == null && useDefault ? defaultExtends : xtends);
}
/**
* Gets the value of the 'extends' page directive attribute.
*
* @return The value of the 'extends' page directive attribute, or the
* default (org.apache.jasper.runtime.HttpJspBase) if this attribute has
* not been set
*/
public String getExtends() {
return getExtends(true);
}
/*
* contentType
*/
public void setContentType(String value) {
contentType = value;
}
public String getContentType() {
return contentType;
}
/*
* buffer
*/
public void setBufferValue(String value, Node n, ErrorDispatcher err)
throws JasperException {
if ("none".equalsIgnoreCase(value)) {
buffer = 0;
} else {
if (value == null || !value.endsWith("kb")) {
if (n == null) {
err.jspError("jsp.error.page.invalid.buffer");
} else {
err.jspError(n, "jsp.error.page.invalid.buffer");
}
}
try {
@SuppressWarnings("null") // value can't be null here
int k = Integer.parseInt(value.substring(0, value.length()-2));
buffer = k * 1024;
} catch (NumberFormatException e) {
if (n == null) {
err.jspError("jsp.error.page.invalid.buffer");
} else {
err.jspError(n, "jsp.error.page.invalid.buffer");
}
}
}
bufferValue = value;
}
public String getBufferValue() {
return bufferValue;
}
public int getBuffer() {
return buffer;
}
/*
* session
*/
public void setSession(String value, Node n, ErrorDispatcher err)
throws JasperException {
if ("true".equalsIgnoreCase(value)) {
isSession = true;
} else if ("false".equalsIgnoreCase(value)) {
isSession = false;
} else {
err.jspError(n, "jsp.error.page.invalid.session");
}
session = value;
}
public String getSession() {
return session;
}
public boolean isSession() {
return isSession;
}
/*
* autoFlush
*/
public void setAutoFlush(String value, Node n, ErrorDispatcher err)
throws JasperException {
if ("true".equalsIgnoreCase(value)) {
isAutoFlush = true;
} else if ("false".equalsIgnoreCase(value)) {
isAutoFlush = false;
} else {
err.jspError(n, "jsp.error.autoFlush.invalid");
}
autoFlush = value;
}
public String getAutoFlush() {
return autoFlush;
}
public boolean isAutoFlush() {
return isAutoFlush;
}
/*
* info
*/
public void setInfo(String value) {
info = value;
}
public String getInfo() {
return info;
}
/*
* errorPage
*/
public void setErrorPage(String value) {
errorPage = value;
}
public String getErrorPage() {
return errorPage;
}
/*
* isErrorPage
*/
public void setIsErrorPage(String value, Node n, ErrorDispatcher err)
throws JasperException {
if ("true".equalsIgnoreCase(value)) {
isErrorPage = true;
} else if ("false".equalsIgnoreCase(value)) {
isErrorPage = false;
} else {
err.jspError(n, "jsp.error.page.invalid.iserrorpage");
}
isErrorPageValue = value;
}
public String getIsErrorPage() {
return isErrorPageValue;
}
public boolean isErrorPage() {
return isErrorPage;
}
/*
* isELIgnored
*/
public void setIsELIgnored(String value, Node n, ErrorDispatcher err,
boolean pagedir)
throws JasperException {
if ("true".equalsIgnoreCase(value)) {
isELIgnored = true;
} else if ("false".equalsIgnoreCase(value)) {
isELIgnored = false;
} else {
if (pagedir) {
err.jspError(n, "jsp.error.page.invalid.iselignored");
} else {
err.jspError(n, "jsp.error.tag.invalid.iselignored");
}
}
isELIgnoredValue = value;
}
/*
* errorOnELNotFound
*/
public void setErrorOnELNotFound(String value, Node n, ErrorDispatcher err,
boolean pagedir)
throws JasperException {
if ("true".equalsIgnoreCase(value)) {
errorOnELNotFound = true;
} else if ("false".equalsIgnoreCase(value)) {
errorOnELNotFound = false;
} else {
if (pagedir) {
err.jspError(n, "jsp.error.page.invalid.errorOnELNotFound");
} else {
err.jspError(n, "jsp.error.tag.invalid.errorOnELNotFound");
}
}
errorOnELNotFoundValue = value;
}
/*
* deferredSyntaxAllowedAsLiteral
*/
public void setDeferredSyntaxAllowedAsLiteral(String value, Node n, ErrorDispatcher err,
boolean pagedir)
throws JasperException {
if ("true".equalsIgnoreCase(value)) {
deferredSyntaxAllowedAsLiteral = true;
} else if ("false".equalsIgnoreCase(value)) {
deferredSyntaxAllowedAsLiteral = false;
} else {
if (pagedir) {
err.jspError(n, "jsp.error.page.invalid.deferredsyntaxallowedasliteral");
} else {
err.jspError(n, "jsp.error.tag.invalid.deferredsyntaxallowedasliteral");
}
}
deferredSyntaxAllowedAsLiteralValue = value;
}
/*
* trimDirectiveWhitespaces
*/
public void setTrimDirectiveWhitespaces(String value, Node n, ErrorDispatcher err,
boolean pagedir)
throws JasperException {
if ("true".equalsIgnoreCase(value)) {
trimDirectiveWhitespaces = true;
} else if ("false".equalsIgnoreCase(value)) {
trimDirectiveWhitespaces = false;
} else {
if (pagedir) {
err.jspError(n, "jsp.error.page.invalid.trimdirectivewhitespaces");
} else {
err.jspError(n, "jsp.error.tag.invalid.trimdirectivewhitespaces");
}
}
trimDirectiveWhitespacesValue = value;
}
public void setELIgnored(boolean s) {
isELIgnored = s;
}
public String getIsELIgnored() {
return isELIgnoredValue;
}
public boolean isELIgnored() {
return isELIgnored;
}
public void setErrorOnELNotFound(boolean s) {
errorOnELNotFound = s;
}
public String getErrorOnELNotFound() {
return errorOnELNotFoundValue;
}
public boolean isErrorOnELNotFound() {
return errorOnELNotFound;
}
public void putNonCustomTagPrefix(String prefix, Mark where) {
nonCustomTagPrefixMap.put(prefix, where);
}
public Mark getNonCustomTagPrefix(String prefix) {
return nonCustomTagPrefixMap.get(prefix);
}
public String getDeferredSyntaxAllowedAsLiteral() {
return deferredSyntaxAllowedAsLiteralValue;
}
public boolean isDeferredSyntaxAllowedAsLiteral() {
return deferredSyntaxAllowedAsLiteral;
}
public void setDeferredSyntaxAllowedAsLiteral(boolean isELDeferred) {
this.deferredSyntaxAllowedAsLiteral = isELDeferred;
}
public ExpressionFactory getExpressionFactory() {
return expressionFactory;
}
public String getTrimDirectiveWhitespaces() {
return trimDirectiveWhitespacesValue;
}
public boolean isTrimDirectiveWhitespaces() {
return trimDirectiveWhitespaces;
}
public void setTrimDirectiveWhitespaces(boolean trimDirectiveWhitespaces) {
this.trimDirectiveWhitespaces = trimDirectiveWhitespaces;
}
public Set<String> getVarInfoNames() {
return varInfoNames;
}
public boolean isErrorOnUndeclaredNamespace() {
return errorOnUndeclaredNamespace;
}
public void setErrorOnUndeclaredNamespace(
boolean errorOnUndeclaredNamespace) {
this.errorOnUndeclaredNamespace = errorOnUndeclaredNamespace;
}
}
| apache/tomcat | java/org/apache/jasper/compiler/PageInfo.java |
1,414 |
package mage.cards.h;
import java.util.UUID;
import mage.abilities.effects.Effect;
import mage.abilities.effects.common.UntapTargetEffect;
import mage.abilities.effects.common.continuous.GainAbilityTargetEffect;
import mage.abilities.effects.common.continuous.GainControlTargetEffect;
import mage.abilities.keyword.HasteAbility;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.Duration;
import mage.filter.StaticFilters;
import mage.target.TargetPermanent;
/**
*
* @author LevelX2
*/
public final class Hijack extends CardImpl {
public Hijack(UUID ownerId, CardSetInfo setInfo) {
super(ownerId,setInfo,new CardType[]{CardType.SORCERY},"{1}{R}{R}");
// Gain control of target artifact or creature until end of turn. Untap it. It gains haste until end of turn.
this.getSpellAbility().addTarget(new TargetPermanent(StaticFilters.FILTER_PERMANENT_ARTIFACT_OR_CREATURE));
this.getSpellAbility().addEffect(new GainControlTargetEffect(Duration.EndOfTurn));
Effect effect = new UntapTargetEffect();
effect.setText("Untap it");
this.getSpellAbility().addEffect(effect);
effect = new GainAbilityTargetEffect(HasteAbility.getInstance(), Duration.EndOfTurn);
effect.setText("It gains haste until end of turn");
this.getSpellAbility().addEffect(effect);
}
private Hijack(final Hijack card) {
super(card);
}
@Override
public Hijack copy() {
return new Hijack(this);
}
}
| ElchananHaas/mage | Mage.Sets/src/mage/cards/h/Hijack.java |
1,415 | // Copyright 2000-2021 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.openapi.vcs.changes.ui;
import com.intellij.ide.FileSelectInContext;
import com.intellij.ide.SelectInContext;
import com.intellij.ide.dnd.DnDAware;
import com.intellij.ide.util.PsiNavigationSupport;
import com.intellij.openapi.actionSystem.*;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.fileChooser.actions.VirtualFileDeleteProvider;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.registry.Registry;
import com.intellij.openapi.vcs.FilePath;
import com.intellij.openapi.vcs.FileStatus;
import com.intellij.openapi.vcs.VcsDataKeys;
import com.intellij.openapi.vcs.changes.*;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.ui.PopupHandler;
import com.intellij.util.containers.JBIterable;
import com.intellij.util.ui.tree.TreeUtil;
import com.intellij.vcs.commit.EditedCommitNode;
import com.intellij.vcsUtil.VcsUtil;
import org.jetbrains.annotations.ApiStatus;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.tree.DefaultMutableTreeNode;
import javax.swing.tree.DefaultTreeModel;
import javax.swing.tree.TreeNode;
import javax.swing.tree.TreePath;
import java.awt.*;
import java.awt.event.MouseEvent;
import java.util.List;
import java.util.Objects;
import static com.intellij.openapi.vcs.changes.ChangesUtil.getNavigatableArray;
import static com.intellij.openapi.vcs.changes.ui.ChangesBrowserNode.*;
// TODO: Check if we could extend DnDAwareTree here instead of directly implementing DnDAware
public abstract class ChangesListView extends ChangesTree implements DataProvider, DnDAware {
private static final Logger LOG = Logger.getInstance(ChangesListView.class);
@NonNls public static final String HELP_ID = "ideaInterface.changes";
@NonNls public static final DataKey<ChangesListView> DATA_KEY
= DataKey.create("ChangeListView");
@NonNls public static final DataKey<Iterable<FilePath>> UNVERSIONED_FILE_PATHS_DATA_KEY
= DataKey.create("ChangeListView.UnversionedFiles");
@NonNls public static final DataKey<Iterable<VirtualFile>> EXACTLY_SELECTED_FILES_DATA_KEY
= DataKey.create("ChangeListView.ExactlySelectedFiles");
@NonNls public static final DataKey<Iterable<FilePath>> IGNORED_FILE_PATHS_DATA_KEY
= DataKey.create("ChangeListView.IgnoredFiles");
@NonNls public static final DataKey<List<FilePath>> MISSING_FILES_DATA_KEY
= DataKey.create("ChangeListView.MissingFiles");
@NonNls public static final DataKey<List<LocallyDeletedChange>> LOCALLY_DELETED_CHANGES
= DataKey.create("ChangeListView.LocallyDeletedChanges");
private boolean myBusy = false;
public ChangesListView(@NotNull Project project, boolean showCheckboxes) {
super(project, showCheckboxes, true);
// setDragEnabled throws an exception in headless mode which leads to a memory leak
if (!ApplicationManager.getApplication().isHeadlessEnvironment()) {
setDragEnabled(true);
}
}
@Override
public int getToggleClickCount() {
return 2;
}
@Override
public void setPaintBusy(boolean paintBusy) {
myBusy = paintBusy;
super.setPaintBusy(paintBusy);
}
@Override
protected boolean isEmptyTextVisible() {
return super.isEmptyTextVisible() && !myBusy;
}
@Override
protected boolean isInclusionVisible(@NotNull ChangesBrowserNode<?> node) {
ChangesBrowserNode<?> subtreeRoot = getSubtreeRoot(node);
Object subtreeRootObject = subtreeRoot != null ? subtreeRoot.getUserObject() : null;
if (subtreeRootObject instanceof LocalChangeList localChangeList) return !localChangeList.getChanges().isEmpty();
if (subtreeRootObject == UNVERSIONED_FILES_TAG && subtreeRoot.getChildCount() > 0) return true;
return false;
}
private static @Nullable ChangesBrowserNode<?> getSubtreeRoot(@NotNull ChangesBrowserNode<?> node) {
TreeNode[] path = node.getPath();
if (path.length < 2) return null;
return (ChangesBrowserNode<?>)path[1];
}
@Override
public DefaultTreeModel getModel() {
return (DefaultTreeModel)super.getModel();
}
@Override
public void rebuildTree() {
// currently not used in ChangesListView code flow
LOG.warn("rebuildTree() not implemented in " + this, new Throwable());
}
@Override
@ApiStatus.Internal
public void updateTreeModel(@NotNull DefaultTreeModel model,
@NotNull TreeStateStrategy treeStateStrategy) {
super.updateTreeModel(model, treeStateStrategy);
}
@Nullable
@Override
public Object getData(@NotNull String dataId) {
if (DATA_KEY.is(dataId)) {
return this;
}
if (VcsDataKeys.CHANGES.is(dataId)) {
return getSelectedChanges()
.toArray(Change.EMPTY_CHANGE_ARRAY);
}
if (VcsDataKeys.CHANGE_LEAD_SELECTION.is(dataId)) {
return VcsTreeModelData.exactlySelected(this)
.iterateUserObjects(Change.class)
.toArray(Change.EMPTY_CHANGE_ARRAY);
}
if (VcsDataKeys.CHANGE_LISTS.is(dataId)) {
return VcsTreeModelData.exactlySelected(this)
.iterateRawUserObjects(ChangeList.class)
.toList().toArray(ChangeList[]::new);
}
if (VcsDataKeys.FILE_PATHS.is(dataId)) {
return VcsTreeModelData.mapToFilePath(VcsTreeModelData.selected(this));
}
if (PlatformDataKeys.DELETE_ELEMENT_PROVIDER.is(dataId)) {
// don't try to delete files when only a changelist node is selected
boolean hasSelection = VcsTreeModelData.exactlySelected(this)
.iterateRawUserObjects()
.filter(userObject -> !(userObject instanceof ChangeList))
.isNotEmpty();
return hasSelection
? new VirtualFileDeleteProvider()
: null;
}
if (UNVERSIONED_FILE_PATHS_DATA_KEY.is(dataId)) {
return getSelectedUnversionedFiles();
}
if (IGNORED_FILE_PATHS_DATA_KEY.is(dataId)) {
return getSelectedIgnoredFiles();
}
if (VcsDataKeys.MODIFIED_WITHOUT_EDITING_DATA_KEY.is(dataId)) {
return getSelectedModifiedWithoutEditing().toList();
}
if (LOCALLY_DELETED_CHANGES.is(dataId)) {
return getSelectedLocallyDeletedChanges().toList();
}
if (MISSING_FILES_DATA_KEY.is(dataId)) {
return getSelectedLocallyDeletedChanges()
.map(LocallyDeletedChange::getPath)
.toList();
}
if (PlatformCoreDataKeys.HELP_ID.is(dataId)) {
return HELP_ID;
}
if (PlatformCoreDataKeys.BGT_DATA_PROVIDER.is(dataId)) {
DataProvider superProvider = (DataProvider)super.getData(dataId);
VcsTreeModelData treeSelection = VcsTreeModelData.selected(this);
VcsTreeModelData exactSelection = VcsTreeModelData.exactlySelected(this);
return CompositeDataProvider.compose(slowId -> getSlowData(myProject, treeSelection, exactSelection, slowId), superProvider);
}
return super.getData(dataId);
}
@Nullable
private static Object getSlowData(@NotNull Project project,
@NotNull VcsTreeModelData treeSelection,
@NotNull VcsTreeModelData exactSelection,
@NotNull String slowId) {
if (SelectInContext.DATA_KEY.is(slowId)) {
VirtualFile file = VcsTreeModelData.mapObjectToVirtualFile(exactSelection.iterateRawUserObjects()).first();
if (file == null) return null;
return new FileSelectInContext(project, file, null);
}
else if (CommonDataKeys.VIRTUAL_FILE_ARRAY.is(slowId)) {
return VcsTreeModelData.mapToVirtualFile(treeSelection)
.toArray(VirtualFile.EMPTY_ARRAY);
}
if (VcsDataKeys.VIRTUAL_FILES.is(slowId)) {
return VcsTreeModelData.mapToVirtualFile(treeSelection);
}
if (CommonDataKeys.NAVIGATABLE.is(slowId)) {
VirtualFile file = VcsTreeModelData.mapToNavigatableFile(treeSelection).single();
return file != null && !file.isDirectory()
? PsiNavigationSupport.getInstance().createNavigatable(project, file, 0)
: null;
}
if (CommonDataKeys.NAVIGATABLE_ARRAY.is(slowId)) {
return getNavigatableArray(project, VcsTreeModelData.mapToNavigatableFile(treeSelection));
}
if (EXACTLY_SELECTED_FILES_DATA_KEY.is(slowId)) {
return VcsTreeModelData.mapToExactVirtualFile(exactSelection);
}
return null;
}
@NotNull
public JBIterable<FilePath> getUnversionedFiles() {
return VcsTreeModelData.allUnderTag(this, UNVERSIONED_FILES_TAG)
.iterateUserObjects(FilePath.class);
}
@NotNull
public JBIterable<FilePath> getSelectedUnversionedFiles() {
return VcsTreeModelData.selectedUnderTag(this, UNVERSIONED_FILES_TAG)
.iterateUserObjects(FilePath.class);
}
@NotNull
private JBIterable<FilePath> getSelectedIgnoredFiles() {
return VcsTreeModelData.selectedUnderTag(this, IGNORED_FILES_TAG)
.iterateUserObjects(FilePath.class);
}
@NotNull
private JBIterable<VirtualFile> getSelectedModifiedWithoutEditing() {
return VcsTreeModelData.selectedUnderTag(this, MODIFIED_WITHOUT_EDITING_TAG)
.iterateUserObjects(VirtualFile.class)
.filter(VirtualFile::isValid);
}
@NotNull
public JBIterable<Change> getSelectedChanges() {
JBIterable<Change> changes = VcsTreeModelData.selected(this)
.iterateUserObjects(Change.class);
JBIterable<Change> hijackedChanges = getSelectedModifiedWithoutEditing()
.map(file -> toHijackedChange(myProject, file))
.filterNotNull();
return changes.append(hijackedChanges);
}
@Nullable
public static Change toHijackedChange(@NotNull Project project, @NotNull VirtualFile file) {
VcsCurrentRevisionProxy before = VcsCurrentRevisionProxy.create(file, project);
if (before != null) {
ContentRevision afterRevision = new CurrentContentRevision(VcsUtil.getFilePath(file));
return new Change(before, afterRevision, FileStatus.HIJACKED);
}
return null;
}
@NotNull
private JBIterable<LocallyDeletedChange> getSelectedLocallyDeletedChanges() {
return VcsTreeModelData.selectedUnderTag(this, LOCALLY_DELETED_NODE_TAG)
.iterateUserObjects(LocallyDeletedChange.class);
}
@Nullable
public List<Change> getAllChangesFromSameChangelist(@NotNull Change change) {
ChangesBrowserNode<?> node = findNodeInTree(change);
if (node == null) return null;
ChangesBrowserNode<?> parent;
if (Registry.is("vcs.skip.single.default.changelist") ||
!ChangeListManager.getInstance(myProject).areChangeListsEnabled()) {
parent = getRoot();
}
else {
parent = findParentOfType(node, ChangesBrowserChangeListNode.class);
}
if (parent == null) return null;
return parent.traverseObjectsUnder()
.filter(Change.class)
.toList();
}
@Nullable
public List<Change> getAllChangesFromSameAmendNode(@NotNull Change change) {
ChangesBrowserNode<?> node = findNodeInTree(change);
if (node == null) return null;
ChangesBrowserNode<?> parent = findParentOfType(node, EditedCommitNode.class);
if (parent == null) return null;
return parent.traverseObjectsUnder()
.filter(Change.class)
.toList();
}
@Nullable
private static ChangesBrowserNode<?> findParentOfType(@NotNull ChangesBrowserNode<?> node,
@NotNull Class<? extends ChangesBrowserNode<?>> clazz) {
ChangesBrowserNode<?> parent = node.getParent();
while (parent != null) {
if (clazz.isInstance(parent)) {
return parent;
}
parent = parent.getParent();
}
return null;
}
@NotNull
public JBIterable<ChangesBrowserChangeNode> getChangesNodes() {
return VcsTreeModelData.all(this).iterateNodes().filter(ChangesBrowserChangeNode.class);
}
@NotNull
public JBIterable<ChangesBrowserNode<?>> getSelectedChangesNodes() {
return VcsTreeModelData.selected(this).iterateNodes();
}
@Override
public void installPopupHandler(@NotNull ActionGroup group) {
PopupHandler.installPopupMenu(this, group, ActionPlaces.CHANGES_VIEW_POPUP);
}
@Override
@NotNull
public JComponent getComponent() {
return this;
}
@Override
public void processMouseEvent(MouseEvent e) {
super.processMouseEvent(e);
}
@Override
public boolean isOverSelection(final Point point) {
return TreeUtil.isOverSelection(this, point);
}
@Override
public void dropSelectionButUnderPoint(final Point point) {
TreeUtil.dropSelectionButUnderPoint(this, point);
}
@Nullable
public ChangesBrowserNode<?> findNodeInTree(@Nullable Object userObject) {
return findNodeInTree(userObject, null);
}
@Nullable
public ChangesBrowserNode<?> findNodeInTree(@Nullable Object userObject, @Nullable Object tag) {
if (userObject instanceof LocalChangeList) {
return getRoot().iterateNodeChildren()
.find(node -> userObject.equals(node.getUserObject()));
}
ChangesBrowserNode<?> fromNode = tag != null ? VcsTreeModelData.findTagNode(this, tag) : getRoot();
if (fromNode == null) return null;
if (userObject instanceof ChangeListChange) {
return VcsTreeModelData.allUnder(fromNode).iterateNodes()
.find(node -> ChangeListChange.HASHING_STRATEGY.equals(node.getUserObject(), userObject));
}
else {
return VcsTreeModelData.allUnder(fromNode).iterateNodes()
.find(node -> Objects.equals(node.getUserObject(), userObject));
}
}
@Nullable
public TreePath findNodePathInTree(@Nullable Object userObject) {
return findNodePathInTree(userObject, null);
}
@Nullable
public TreePath findNodePathInTree(@Nullable Object userObject, @Nullable Object tag) {
DefaultMutableTreeNode node = findNodeInTree(userObject, tag);
return node != null ? TreeUtil.getPathFromRoot(node) : null;
}
/**
* Expands node only if its child count is small enough.
* As expanding node with large child count is a slow operation (and result is not very useful).
*/
public void expandSafe(@NotNull DefaultMutableTreeNode node) {
if (node.getChildCount() <= 10000) {
expandPath(TreeUtil.getPathFromRoot(node));
}
}
} | JetBrains/intellij-community | platform/vcs-impl/src/com/intellij/openapi/vcs/changes/ui/ChangesListView.java |
1,416 | /*
* Copyright 2022 The OSHI Project Contributors
* SPDX-License-Identifier: MIT
*/
package oshi.demo.jmx.demo;
import oshi.demo.jmx.CreateJmxOshiAgent;
import oshi.demo.jmx.api.JMXOshiAgent;
public class OshiJMXServer {
public static void main(String[] args) throws Exception {
JMXOshiAgent oshiAgent = CreateJmxOshiAgent.createJmxOshiAgent(8888, "127.0.0.1");
oshiAgent.startAgent();
}
}
| oshi/oshi | oshi-demo/src/main/java/oshi/demo/jmx/demo/OshiJMXServer.java |
1,417 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.discovery;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService;
import org.elasticsearch.cluster.coordination.Coordinator;
import org.elasticsearch.cluster.coordination.FollowersChecker;
import org.elasticsearch.cluster.coordination.LeaderChecker;
import org.elasticsearch.cluster.coordination.MasterHistoryService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeUtils;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.ChunkedToXContent;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.health.GetHealthAction;
import org.elasticsearch.health.HealthStatus;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.disruption.LongGCDisruption;
import org.elasticsearch.test.disruption.NetworkDisruption;
import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType;
import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions;
import org.elasticsearch.test.disruption.SingleNodeDisruption;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.json.JsonXContent;
import org.hamcrest.Matcher;
import org.junit.Before;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static java.util.Collections.singleton;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
/**
* Tests relating to the loss of the master, but which work with the default fault detection settings which are rather lenient and will
* not detect a master failure too quickly.
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false)
public class StableMasterDisruptionIT extends ESIntegTestCase {
@Before
private void setBootstrapMasterNodeIndex() {
internalCluster().setBootstrapMasterNodeIndex(0);
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(MockTransportService.TestPlugin.class);
}
/**
* Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488
*/
public void testFailWithMinimumMasterNodesConfigured() throws Exception {
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
// Figure out what is the elected master node
final String masterNode = internalCluster().getMasterName();
logger.info("---> legit elected master node={}", masterNode);
// Pick a node that isn't the elected master.
Set<String> nonMasters = new HashSet<>(nodes);
nonMasters.remove(masterNode);
final String unluckyNode = randomFrom(nonMasters.toArray(Strings.EMPTY_ARRAY));
// Simulate a network issue between the unlucky node and elected master node in both directions.
NetworkDisruption networkDisconnect = new NetworkDisruption(
new NetworkDisruption.TwoPartitions(masterNode, unluckyNode),
NetworkDisruption.DISCONNECT
);
setDisruptionScheme(networkDisconnect);
networkDisconnect.startDisrupting();
// Wait until elected master has removed that the unlucky node...
ensureStableCluster(2, masterNode);
// The unlucky node must report *no* master node, since it can't connect to master and in fact it should
// continuously ping until network failures have been resolved. However
// It may a take a bit before the node detects it has been cut off from the elected master
ensureNoMaster(unluckyNode);
// because it has had a master within the last 30s:
assertGreenMasterStability(internalCluster().client(unluckyNode));
networkDisconnect.stopDisrupting();
// Wait until the master node sees all 3 nodes again.
ensureStableCluster(3);
// The elected master shouldn't have changed, since the unlucky node never could have elected itself as master
assertThat(internalCluster().getMasterName(), equalTo(masterNode));
assertGreenMasterStability(internalCluster().client());
}
private void assertGreenMasterStability(Client client) throws Exception {
assertMasterStability(client, HealthStatus.GREEN, containsString("The cluster has a stable master node"));
}
private void assertMasterStability(Client client, HealthStatus expectedStatus, Matcher<String> expectedMatcher) throws Exception {
assertBusy(() -> {
GetHealthAction.Response healthResponse = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000))
.get();
String debugInformation = xContentToString(healthResponse);
assertThat(debugInformation, healthResponse.findIndicator("master_is_stable").status(), equalTo(expectedStatus));
assertThat(debugInformation, healthResponse.findIndicator("master_is_stable").symptom(), expectedMatcher);
});
}
private String xContentToString(ChunkedToXContent xContent) throws IOException {
XContentBuilder builder = JsonXContent.contentBuilder();
xContent.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> {
try {
xcontent.toXContent(builder, ToXContent.EMPTY_PARAMS);
} catch (IOException e) {
logger.error(e.getMessage(), e);
fail(e.getMessage());
}
});
return BytesReference.bytes(builder).utf8ToString();
}
private void ensureNoMaster(String node) throws Exception {
assertBusy(
() -> assertNull(
client(node).admin().cluster().state(new ClusterStateRequest().local(true)).get().getState().nodes().getMasterNode()
)
);
}
/**
* Verify that nodes fault detection detects a disconnected node after master reelection
*/
public void testFollowerCheckerDetectsDisconnectedNodeAfterMasterReelection() throws Exception {
testFollowerCheckerAfterMasterReelection(NetworkDisruption.DISCONNECT, Settings.EMPTY);
assertGreenMasterStability(internalCluster().client());
}
/**
* Verify that nodes fault detection detects an unresponsive node after master reelection
*/
public void testFollowerCheckerDetectsUnresponsiveNodeAfterMasterReelection() throws Exception {
testFollowerCheckerAfterMasterReelection(
NetworkDisruption.UNRESPONSIVE,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), "4")
.put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1)
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "10s")
.build()
);
assertGreenMasterStability(internalCluster().client());
}
private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType networkLinkDisruptionType, Settings settings)
throws Exception {
internalCluster().startNodes(4, settings);
ensureStableCluster(4);
logger.info("--> stopping current master");
internalCluster().stopCurrentMasterNode();
ensureStableCluster(3);
final String master = internalCluster().getMasterName();
final List<String> nonMasters = Arrays.stream(internalCluster().getNodeNames()).filter(n -> master.equals(n) == false).toList();
final String isolatedNode = randomFrom(nonMasters);
final String otherNode = nonMasters.get(nonMasters.get(0).equals(isolatedNode) ? 1 : 0);
logger.info("--> isolating [{}]", isolatedNode);
final NetworkDisruption networkDisruption = new NetworkDisruption(
new TwoPartitions(singleton(isolatedNode), Sets.newHashSet(master, otherNode)),
networkLinkDisruptionType
);
setDisruptionScheme(networkDisruption);
networkDisruption.startDisrupting();
logger.info("--> waiting for master to remove it");
ensureStableCluster(2, master);
ensureNoMaster(isolatedNode);
networkDisruption.stopDisrupting();
ensureStableCluster(3);
}
/**
* Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are
* following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master.
*/
public void testStaleMasterNotHijackingMajority() throws Exception {
assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20);
final List<String> nodes = internalCluster().startNodes(
3,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.build()
);
ensureStableCluster(3);
// Save the current master node as old master node, because that node will get frozen
final String oldMasterNode = internalCluster().getMasterName();
// Simulating a painful gc by suspending all threads for a long time on the current elected master node.
SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode);
// Save the majority side
final List<String> majoritySide = new ArrayList<>(nodes);
majoritySide.remove(oldMasterNode);
// Keeps track of the previous and current master when a master node transition took place on each node on the majority side:
final Map<String, List<Tuple<String, String>>> masters = Collections.synchronizedMap(new HashMap<>());
for (final String node : majoritySide) {
masters.put(node, new ArrayList<>());
internalCluster().getInstance(ClusterService.class, node).addListener(event -> {
DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode();
DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
if (Objects.equals(previousMaster, currentMaster) == false) {
logger.info(
"--> node {} received new cluster state: {} \n and had previous cluster state: {}",
node,
event.state(),
event.previousState()
);
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
}
});
}
final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1);
internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> {
if (event.state().nodes().getMasterNodeId() == null) {
oldMasterNodeSteppedDown.countDown();
}
});
internalCluster().setDisruptionScheme(masterNodeDisruption);
logger.info("--> freezing node [{}]", oldMasterNode);
masterNodeDisruption.startDisrupting();
// Wait for majority side to elect a new master
assertBusy(() -> {
for (final Map.Entry<String, List<Tuple<String, String>>> entry : masters.entrySet()) {
final List<Tuple<String, String>> transitions = entry.getValue();
assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null));
}
});
// The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and
// once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is
// flagged as master to the other nodes that follow the new master. These nodes should ignore this update.
internalCluster().getInstance(ClusterService.class, oldMasterNode)
.submitUnbatchedStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).build();
}
@Override
public void onFailure(Exception e) {
logger.warn("failure [sneaky-update]", e);
}
});
// Save the new elected master node
final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0));
logger.info("--> new detected master node [{}]", newMasterNode);
// Stop disruption
logger.info("--> unfreezing node [{}]", oldMasterNode);
masterNodeDisruption.stopDisrupting();
oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS);
logger.info("--> [{}] stepped down as master", oldMasterNode);
ensureStableCluster(3);
assertThat(masters.size(), equalTo(2));
for (Map.Entry<String, List<Tuple<String, String>>> entry : masters.entrySet()) {
String nodeName = entry.getKey();
List<Tuple<String, String>> transitions = entry.getValue();
assertTrue(
"[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions,
transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2()))
);
}
assertGreenMasterStability(internalCluster().client());
}
/**
* This helper method creates a 3-node cluster where all nodes are master-eligible, and then simulates a long GC on the master node 5
* times (forcing another node to be elected master 5 times). It then asserts that the master stability health indicator status is
* YELLOW, and that expectedMasterStabilitySymptomSubstring is contained in the symptom.
* @param expectedMasterStabilitySymptomSubstring A string to expect in the master stability health indicator symptom
* @throws Exception
*/
public void testRepeatedMasterChanges(String expectedMasterStabilitySymptomSubstring) throws Exception {
assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20);
final List<String> nodes = internalCluster().startNodes(
3,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.put(CoordinationDiagnosticsService.IDENTITY_CHANGES_THRESHOLD_SETTING.getKey(), 1)
.put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 100)
.build()
);
ensureStableCluster(3);
String firstMaster = internalCluster().getMasterName();
// Force the master to change 2 times:
for (int i = 0; i < 2; i++) {
// Save the current master node as old master node, because that node will get frozen
final String oldMasterNode = internalCluster().getMasterName();
// Simulating a painful gc by suspending all threads for a long time on the current elected master node.
SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode);
// Save the majority side
final List<String> majoritySide = new ArrayList<>(nodes);
majoritySide.remove(oldMasterNode);
// Keeps track of the previous and current master when a master node transition took place on each node on the majority side:
final Map<String, List<Tuple<String, String>>> masters = Collections.synchronizedMap(new HashMap<>());
for (final String node : majoritySide) {
masters.put(node, new ArrayList<>());
internalCluster().getInstance(ClusterService.class, node).addListener(event -> {
DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode();
DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
if (Objects.equals(previousMaster, currentMaster) == false) {
logger.info(
"--> node {} received new cluster state: {} \n and had previous cluster state: {}",
node,
event.state(),
event.previousState()
);
String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null;
String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null;
masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
}
});
}
final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1);
internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> {
if (event.state().nodes().getMasterNodeId() == null) {
oldMasterNodeSteppedDown.countDown();
}
});
internalCluster().clearDisruptionScheme();
internalCluster().setDisruptionScheme(masterNodeDisruption);
logger.info("--> freezing node [{}]", oldMasterNode);
masterNodeDisruption.startDisrupting();
// Wait for majority side to elect a new master
assertBusy(() -> {
for (final Map.Entry<String, List<Tuple<String, String>>> entry : masters.entrySet()) {
final List<Tuple<String, String>> transitions = entry.getValue();
assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null));
}
});
// Save the new elected master node
final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0));
logger.info("--> new detected master node [{}]", newMasterNode);
// Stop disruption
logger.info("--> unfreezing node [{}]", oldMasterNode);
masterNodeDisruption.stopDisrupting();
oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS);
logger.info("--> [{}] stepped down as master", oldMasterNode);
ensureStableCluster(3);
assertThat(masters.size(), equalTo(2));
}
List<String> nodeNamesExceptFirstMaster = Arrays.stream(internalCluster().getNodeNames())
.filter(name -> name.equals(firstMaster) == false)
.toList();
/*
* It is possible that the first node that became master got re-elected repeatedly. And since it was in a simulated GC when the
* other node(s) were master, it only saw itself as master. So we want to check with another node.
*/
Client client = internalCluster().client(randomFrom(nodeNamesExceptFirstMaster));
assertMasterStability(client, HealthStatus.YELLOW, containsString(expectedMasterStabilitySymptomSubstring));
}
public void testRepeatedNullMasterRecognizedAsGreenIfMasterDoesNotKnowItIsUnstable() throws Exception {
assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20);
/*
* In this test we have a single master-eligible node. We pause it repeatedly (simulating a long GC pause for example) so that
* other nodes decide it is no longer the master. However since there is no other master-eligible node, another node is never
* elected master. And the master node never recognizes that it had a problem. So when we run the master stability check on one
* of the data nodes, it will see that there is a problem (the master has gone null repeatedly), but when it checks with the
* master, the master says everything is fine. So we expect a GREEN status.
*/
final List<String> masterNodes = internalCluster().startMasterOnlyNodes(
1,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1)
.build()
);
int nullTransitionsThreshold = 1;
final List<String> dataNodes = internalCluster().startDataOnlyNodes(
2,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), nullTransitionsThreshold)
.put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(60, TimeUnit.SECONDS))
.build()
);
ensureStableCluster(3);
for (int i = 0; i < nullTransitionsThreshold + 1; i++) {
final String masterNode = masterNodes.get(0);
// Simulating a painful gc by suspending all threads for a long time on the current elected master node.
SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), masterNode);
final CountDownLatch dataNodeMasterSteppedDown = new CountDownLatch(2);
internalCluster().getInstance(ClusterService.class, dataNodes.get(0)).addListener(event -> {
if (event.state().nodes().getMasterNodeId() == null) {
dataNodeMasterSteppedDown.countDown();
}
});
internalCluster().getInstance(ClusterService.class, dataNodes.get(1)).addListener(event -> {
if (event.state().nodes().getMasterNodeId() == null) {
dataNodeMasterSteppedDown.countDown();
}
});
internalCluster().clearDisruptionScheme();
internalCluster().setDisruptionScheme(masterNodeDisruption);
logger.info("--> freezing node [{}]", masterNode);
masterNodeDisruption.startDisrupting();
dataNodeMasterSteppedDown.await(30, TimeUnit.SECONDS);
// Stop disruption
logger.info("--> unfreezing node [{}]", masterNode);
masterNodeDisruption.stopDisrupting();
ensureStableCluster(3, TimeValue.timeValueSeconds(30), false, randomFrom(dataNodes));
}
assertGreenMasterStability(internalCluster().client(randomFrom(dataNodes)));
}
public void testNoMasterEligibleNodes() throws Exception {
/*
* In this test we have a single master-eligible node. We then stop the master. We set the master lookup threshold very low on the
* data nodes, so when we run the master stability check on one of the data nodes, it will see that there has been no master
* recently and there are no master eligible nodes, so it returns a RED status.
*/
internalCluster().startMasterOnlyNodes(
1,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1)
.build()
);
final List<String> dataNodes = internalCluster().startDataOnlyNodes(
2,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1)
.put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO)
.put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS))
.build()
);
ensureStableCluster(3);
internalCluster().stopCurrentMasterNode();
assertMasterStability(
internalCluster().client(randomFrom(dataNodes)),
HealthStatus.RED,
containsString("No master eligible nodes found in the cluster")
);
for (String dataNode : dataNodes) {
internalCluster().stopNode(dataNode);
}
}
public void testCannotJoinLeader() throws Exception {
/*
* In this test we have a single master-eligible node. We create a cluster change event saying that the master went to null and
* send it only to the master history on each data node. As a result, the PeerFinder still thinks it is the master. Since the
* PeerFinder thinks there is a master but we have record of it being null in the history, the data node thinks that it has
* problems joining the elected master and returns a RED status.
*/
internalCluster().startMasterOnlyNodes(
1,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1)
.build()
);
final List<String> dataNodes = internalCluster().startDataOnlyNodes(
2,
Settings.builder()
.put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s")
.put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s")
.put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1)
.put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), TimeValue.ZERO)
.put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(1, TimeUnit.SECONDS))
.build()
);
ensureStableCluster(3);
Iterable<MasterHistoryService> masterHistoryServices = internalCluster().getDataNodeInstances(MasterHistoryService.class);
for (MasterHistoryService masterHistoryService : masterHistoryServices) {
ClusterState state = new ClusterState.Builder(new ClusterName(internalCluster().getClusterName())).nodes(
new DiscoveryNodes.Builder().masterNodeId(null)
).build();
ClusterState previousState = new ClusterState.Builder(new ClusterName(internalCluster().getClusterName())).nodes(
new DiscoveryNodes.Builder().masterNodeId("test").add(DiscoveryNodeUtils.create("test", "test"))
).build();
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("test", state, previousState);
masterHistoryService.getLocalMasterHistory().clusterChanged(clusterChangedEvent);
}
assertMasterStability(
internalCluster().client(randomFrom(dataNodes)),
HealthStatus.RED,
containsString("has been elected master, but the node being queried")
);
}
}
| elastic/elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java |
1,418 | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jdbi.v3.moshi;
import java.io.IOException;
import java.lang.reflect.Type;
import com.squareup.moshi.JsonAdapter;
import org.jdbi.v3.core.config.ConfigRegistry;
import org.jdbi.v3.core.result.UnableToProduceResultException;
import org.jdbi.v3.json.JsonMapper;
class MoshiJsonMapper implements JsonMapper {
@Override
public TypedJsonMapper forType(Type type, ConfigRegistry config) {
return new TypedJsonMapper() {
private final JsonAdapter<Object> adapter = config.get(MoshiConfig.class).getMoshi().adapter(type);
@Override
public String toJson(Object value, ConfigRegistry config) {
return adapter.toJson(value);
}
@Override
public Object fromJson(String json, ConfigRegistry config) {
try {
return adapter.fromJson(json);
} catch (IOException e) {
throw new UnableToProduceResultException(e);
}
}
};
}
}
| jdbi/jdbi | moshi/src/main/java/org/jdbi/v3/moshi/MoshiJsonMapper.java |
1,419 | package com.googleresearch.bustle;
import static com.google.common.truth.Truth.assertThat;
import static org.junit.Assert.assertThrows;
import com.googleresearch.bustle.exception.SynthesisError;
import com.googleresearch.bustle.value.ConstantValue;
import com.googleresearch.bustle.value.InputValue;
import com.googleresearch.bustle.value.Value;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@RunWith(JUnit4.class)
public final class UtilsTest {
@Test
public void generatePartitionsReturnsExpected() throws Exception {
assertThat(Utils.generatePartitions(5, 3))
.containsExactly(
Arrays.asList(1, 1, 3),
Arrays.asList(1, 2, 2),
Arrays.asList(1, 3, 1),
Arrays.asList(2, 1, 2),
Arrays.asList(2, 2, 1),
Arrays.asList(3, 1, 1));
assertThat(Utils.generatePartitions(5, 1)).containsExactly(Arrays.asList(5));
assertThat(Utils.generatePartitions(5, 5)).containsExactly(Arrays.asList(1, 1, 1, 1, 1));
assertThat(Utils.generatePartitions(5, 6)).isEmpty();
assertThat(Utils.generatePartitions(0, 5)).isEmpty();
}
@Test
public void generatePartitionsRaises() throws Exception {
assertThrows(SynthesisError.class, () -> Utils.generatePartitions(-1, 3));
assertThrows(SynthesisError.class, () -> Utils.generatePartitions(5, 0));
}
@Test
public void checkSubExpressionGetter() throws Exception {
// create the following expression:
// CONCATENATE(LEFT(in_1, 4), in_2)
Operation leftOp = Operation.lookupOperation("LEFT", 2);
Operation concatOp = Operation.lookupOperation("CONCATENATE", 2);
InputValue findTxt = new InputValue(Arrays.asList("abcde", "fghij", "klmno"), "in_1");
ConstantValue numChars = new ConstantValue(4, 3);
List<Value> leftArgs = Arrays.asList(findTxt, numChars);
Value leftValue = leftOp.apply(leftArgs);
InputValue muppetTxt = new InputValue(Arrays.asList("_kermit", "_animal", "_beaker"), "in_2");
List<Value> concatArgs = Arrays.asList(leftValue, muppetTxt);
Value concatValue = concatOp.apply(concatArgs);
Set<Value> subExpressions = Utils.getSubExpressions(concatValue);
assertThat(subExpressions)
.containsExactly(
findTxt, // in_1
numChars, // 4
leftValue, // LEFT(in_1, 4)
muppetTxt, // in_2
concatValue // CONCATENATE(LEFT(in_1, 4), in_2)
);
}
}
| google-research/google-research | bustle/javatests/com/googleresearch/bustle/UtilsTest.java |
1,420 | package com.thealgorithms.ciphers;
import java.util.Objects;
/**
* Columnar Transposition Cipher Encryption and Decryption.
*
* @author <a href="https://github.com/freitzzz">freitzzz</a>
*/
public final class ColumnarTranspositionCipher {
private ColumnarTranspositionCipher() {
}
private static String keyword;
private static Object[][] table;
private static String abecedarium;
public static final String ABECEDARIUM = "abcdefghijklmnopqrstuvwxyzABCDEFG"
+ "HIJKLMNOPQRSTUVWXYZ0123456789,.;:-@";
private static final String ENCRYPTION_FIELD = "≈";
private static final char ENCRYPTION_FIELD_CHAR = '≈';
/**
* Encrypts a certain String with the Columnar Transposition Cipher Rule
*
* @param word Word being encrypted
* @param keyword String with keyword being used
* @return a String with the word encrypted by the Columnar Transposition
* Cipher Rule
*/
public static String encrpyter(String word, String keyword) {
ColumnarTranspositionCipher.keyword = keyword;
abecedariumBuilder(500);
table = tableBuilder(word);
Object[][] sortedTable = sortTable(table);
StringBuilder wordEncrypted = new StringBuilder();
for (int i = 0; i < sortedTable[i].length; i++) {
for (int j = 1; j < sortedTable.length; j++) {
wordEncrypted.append(sortedTable[j][i]);
}
}
return wordEncrypted.toString();
}
/**
* Encrypts a certain String with the Columnar Transposition Cipher Rule
*
* @param word Word being encrypted
* @param keyword String with keyword being used
* @param abecedarium String with the abecedarium being used. null for
* default one
* @return a String with the word encrypted by the Columnar Transposition
* Cipher Rule
*/
public static String encrpyter(String word, String keyword, String abecedarium) {
ColumnarTranspositionCipher.keyword = keyword;
ColumnarTranspositionCipher.abecedarium = Objects.requireNonNullElse(abecedarium, ABECEDARIUM);
table = tableBuilder(word);
Object[][] sortedTable = sortTable(table);
StringBuilder wordEncrypted = new StringBuilder();
for (int i = 0; i < sortedTable[0].length; i++) {
for (int j = 1; j < sortedTable.length; j++) {
wordEncrypted.append(sortedTable[j][i]);
}
}
return wordEncrypted.toString();
}
/**
* Decrypts a certain encrypted String with the Columnar Transposition
* Cipher Rule
*
* @return a String decrypted with the word encrypted by the Columnar
* Transposition Cipher Rule
*/
public static String decrypter() {
StringBuilder wordDecrypted = new StringBuilder();
for (int i = 1; i < table.length; i++) {
for (Object item : table[i]) {
wordDecrypted.append(item);
}
}
return wordDecrypted.toString().replaceAll(ENCRYPTION_FIELD, "");
}
/**
* Builds a table with the word to be encrypted in rows by the Columnar
* Transposition Cipher Rule
*
* @return An Object[][] with the word to be encrypted filled in rows and
* columns
*/
private static Object[][] tableBuilder(String word) {
Object[][] table = new Object[numberOfRows(word) + 1][keyword.length()];
char[] wordInChards = word.toCharArray();
// Fils in the respective numbers
table[0] = findElements();
int charElement = 0;
for (int i = 1; i < table.length; i++) {
for (int j = 0; j < table[i].length; j++) {
if (charElement < wordInChards.length) {
table[i][j] = wordInChards[charElement];
charElement++;
} else {
table[i][j] = ENCRYPTION_FIELD_CHAR;
}
}
}
return table;
}
/**
* Determines the number of rows the table should have regarding the
* Columnar Transposition Cipher Rule
*
* @return an int with the number of rows that the table should have in
* order to respect the Columnar Transposition Cipher Rule.
*/
private static int numberOfRows(String word) {
if (word.length() / keyword.length() > word.length() / keyword.length()) {
return (word.length() / keyword.length()) + 1;
} else {
return word.length() / keyword.length();
}
}
/**
* @return charValues
*/
private static Object[] findElements() {
Object[] charValues = new Object[keyword.length()];
for (int i = 0; i < charValues.length; i++) {
int charValueIndex = abecedarium.indexOf(keyword.charAt(i));
charValues[i] = charValueIndex > -1 ? charValueIndex : null;
}
return charValues;
}
/**
* @return tableSorted
*/
private static Object[][] sortTable(Object[][] table) {
Object[][] tableSorted = new Object[table.length][table[0].length];
for (int i = 0; i < tableSorted.length; i++) {
System.arraycopy(table[i], 0, tableSorted[i], 0, tableSorted[i].length);
}
for (int i = 0; i < tableSorted[0].length; i++) {
for (int j = i + 1; j < tableSorted[0].length; j++) {
if ((int) tableSorted[0][i] > (int) table[0][j]) {
Object[] column = getColumn(tableSorted, tableSorted.length, i);
switchColumns(tableSorted, j, i, column);
}
}
}
return tableSorted;
}
/**
* @return columnArray
*/
private static Object[] getColumn(Object[][] table, int rows, int column) {
Object[] columnArray = new Object[rows];
for (int i = 0; i < rows; i++) {
columnArray[i] = table[i][column];
}
return columnArray;
}
private static void switchColumns(Object[][] table, int firstColumnIndex, int secondColumnIndex, Object[] columnToSwitch) {
for (int i = 0; i < table.length; i++) {
table[i][secondColumnIndex] = table[i][firstColumnIndex];
table[i][firstColumnIndex] = columnToSwitch[i];
}
}
/**
* Creates an abecedarium with a specified ascii inded
*
* @param value Number of characters being used based on the ASCII Table
*/
private static void abecedariumBuilder(int value) {
StringBuilder t = new StringBuilder();
for (int i = 0; i < value; i++) {
t.append((char) i);
}
abecedarium = t.toString();
}
private static void showTable() {
for (Object[] table1 : table) {
for (Object item : table1) {
System.out.print(item + " ");
}
System.out.println();
}
}
public static void main(String[] args) {
String keywordForExample = "asd215";
String wordBeingEncrypted = "This is a test of the Columnar Transposition Cipher";
System.out.println("### Example of Columnar Transposition Cipher ###\n");
System.out.println("Word being encryped ->>> " + wordBeingEncrypted);
System.out.println("Word encrypted ->>> " + ColumnarTranspositionCipher.encrpyter(wordBeingEncrypted, keywordForExample));
System.out.println("Word decryped ->>> " + ColumnarTranspositionCipher.decrypter());
System.out.println("\n### Encrypted Table ###");
showTable();
}
}
| TheAlgorithms/Java | src/main/java/com/thealgorithms/ciphers/ColumnarTranspositionCipher.java |
1,421 | /*
* Copyright 2002-2023 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.http.converter.json;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.springframework.http.MediaType;
import org.springframework.lang.Nullable;
/**
* Implementation of {@link org.springframework.http.converter.HttpMessageConverter} that can read and
* write JSON using <a href="https://github.com/FasterXML/jackson">Jackson 2.x's</a> {@link ObjectMapper}.
*
* <p>This converter can be used to bind to typed beans, or untyped {@code HashMap} instances.
*
* <p>By default, this converter supports {@code application/json} and {@code application/*+json}
* with {@code UTF-8} character set. This can be overridden by setting the
* {@link #setSupportedMediaTypes supportedMediaTypes} property.
*
* <p>The default constructor uses the default configuration provided by {@link Jackson2ObjectMapperBuilder}.
*
* @author Arjen Poutsma
* @author Keith Donald
* @author Rossen Stoyanchev
* @author Juergen Hoeller
* @author Sebastien Deleuze
* @since 3.1.2
*/
public class MappingJackson2HttpMessageConverter extends AbstractJackson2HttpMessageConverter {
private static final List<MediaType> problemDetailMediaTypes =
Collections.singletonList(MediaType.APPLICATION_PROBLEM_JSON);
@Nullable
private String jsonPrefix;
/**
* Construct a new {@link MappingJackson2HttpMessageConverter} using default configuration
* provided by {@link Jackson2ObjectMapperBuilder}.
*/
public MappingJackson2HttpMessageConverter() {
this(Jackson2ObjectMapperBuilder.json().build());
}
/**
* Construct a new {@link MappingJackson2HttpMessageConverter} with a custom {@link ObjectMapper}.
* You can use {@link Jackson2ObjectMapperBuilder} to build it easily.
* @see Jackson2ObjectMapperBuilder#json()
*/
public MappingJackson2HttpMessageConverter(ObjectMapper objectMapper) {
super(objectMapper, MediaType.APPLICATION_JSON, new MediaType("application", "*+json"));
}
/**
* Specify a custom prefix to use for this view's JSON output.
* Default is none.
* @see #setPrefixJson
*/
public void setJsonPrefix(String jsonPrefix) {
this.jsonPrefix = jsonPrefix;
}
/**
* Indicate whether the JSON output by this view should be prefixed with ")]}', ". Default is {@code false}.
* <p>Prefixing the JSON string in this manner is used to help prevent JSON Hijacking.
* The prefix renders the string syntactically invalid as a script so that it cannot be hijacked.
* This prefix should be stripped before parsing the string as JSON.
* @see #setJsonPrefix
*/
public void setPrefixJson(boolean prefixJson) {
this.jsonPrefix = (prefixJson ? ")]}', " : null);
}
@Override
protected List<MediaType> getMediaTypesForProblemDetail() {
return problemDetailMediaTypes;
}
@Override
protected void writePrefix(JsonGenerator generator, Object object) throws IOException {
if (this.jsonPrefix != null) {
generator.writeRaw(this.jsonPrefix);
}
}
}
| spring-projects/spring-framework | spring-web/src/main/java/org/springframework/http/converter/json/MappingJackson2HttpMessageConverter.java |
1,422 | /*
* Copyright 2002-2023 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.core.codec;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.infra.Blackhole;
import reactor.core.publisher.Flux;
import org.springframework.core.ResolvableType;
import org.springframework.core.io.buffer.DataBuffer;
import org.springframework.core.io.buffer.DataBufferFactory;
import org.springframework.core.io.buffer.DataBufferUtils;
import org.springframework.core.io.buffer.DefaultDataBufferFactory;
import org.springframework.util.MimeType;
/**
* Benchmarks for {@link DataBufferUtils}.
*
* @author Rossen Stoyanchev
*/
@BenchmarkMode(Mode.Throughput)
public class StringDecoderBenchmark {
@Benchmark
public void parseSseLines(SseLinesState state, Blackhole blackhole) {
blackhole.consume(state.parseLines().blockLast());
}
@State(Scope.Benchmark)
@SuppressWarnings({"NotNullFieldNotInitialized", "ConstantConditions"})
public static class SseLinesState {
private static final Charset CHARSET = StandardCharsets.UTF_8;
private static final ResolvableType ELEMENT_TYPE = ResolvableType.forClass(String.class);
@Param("10240")
int totalSize;
@Param("2000")
int chunkSize;
List<DataBuffer> chunks;
StringDecoder decoder = StringDecoder.textPlainOnly(Arrays.asList("\r\n", "\n"), false);
MimeType mimeType = new MimeType("text", "plain", CHARSET);
@Setup(Level.Trial)
public void setup() {
String eventTemplate = """
id:$1
event:some-event
:some-comment-$1-aa
:some-comment-$1-bb
data:abcdefg-$1-hijklmnop-$1-qrstuvw-$1-xyz-$1
""";
int eventLength = String.format(eventTemplate, String.format("%05d", 1)).length();
int eventCount = this.totalSize / eventLength;
DataBufferFactory bufferFactory = new DefaultDataBufferFactory();
this.chunks = Flux.range(1, eventCount)
.map(index -> String.format(eventTemplate, String.format("%05d", index)))
.buffer(this.chunkSize > eventLength ? this.chunkSize / eventLength : 1)
.map(strings -> String.join("", strings))
.map(chunk -> {
byte[] bytes = chunk.getBytes(CHARSET);
DataBuffer buffer = bufferFactory.allocateBuffer(bytes.length);
buffer.write(bytes);
return buffer;
})
.collectList()
.block();
}
public Flux<String> parseLines() {
Flux<DataBuffer> input = Flux.fromIterable(this.chunks).doOnNext(DataBufferUtils::retain);
return this.decoder.decode(input, ELEMENT_TYPE, this.mimeType, Collections.emptyMap());
}
}
}
| spring-projects/spring-framework | spring-core/src/jmh/java/org/springframework/core/codec/StringDecoderBenchmark.java |
1,423 | 404: Not Found | ActorExpose/android | cSploit/src/main/java/org/csploit/android/plugins/mitm/hijacker/Hijacker.java |
1,424 | package emu.grasscutter;
public final class DebugConstants {
public static boolean LOG_ABILITIES = false;
public static boolean LOG_LUA_SCRIPTS = false;
public static boolean LOG_QUEST_START = false;
public static boolean LOG_MISSING_ABILITIES = false;
public static boolean LOG_MISSING_LUA_SCRIPTS = false;
public static boolean LOG_MISSING_ABILITY_HANDLERS = false;
/**
* WARNING: THIS IS A DANGEROUS SETTING. DO NOT ENABLE UNLESS YOU KNOW WHAT YOU ARE DOING. This
* allows the *client* to send *ANY* token and UID pair to the server. The server will then accept
* the token and UID pair as valid, and set the account's token to the client specified one. This
* can allow for IMPERSONATION and HIJACKING of accounts/servers.
*/
public static final boolean ACCEPT_CLIENT_TOKEN = false;
private DebugConstants() {
// Prevent instantiation.
}
}
| Grasscutters/Grasscutter | src/main/java/emu/grasscutter/DebugConstants.java |
1,425 | /*
* Copyright 2002-2023 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.web.servlet.view.json;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import com.fasterxml.jackson.annotation.JsonView;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import org.springframework.http.converter.json.Jackson2ObjectMapperBuilder;
import org.springframework.lang.Nullable;
import org.springframework.util.CollectionUtils;
import org.springframework.validation.BindingResult;
import org.springframework.web.servlet.View;
/**
* Spring MVC {@link View} that renders JSON content by serializing the model for the current request
* using <a href="https://github.com/FasterXML/jackson">Jackson 2's</a> {@link ObjectMapper}.
*
* <p>By default, the entire contents of the model map (with the exception of framework-specific classes)
* will be encoded as JSON. If the model contains only one key, you can have it extracted encoded as JSON
* alone via {@link #setExtractValueFromSingleKeyModel}.
*
* <p>The default constructor uses the default configuration provided by {@link Jackson2ObjectMapperBuilder}.
*
* @author Jeremy Grelle
* @author Arjen Poutsma
* @author Rossen Stoyanchev
* @author Juergen Hoeller
* @author Sebastien Deleuze
* @since 3.1.2
*/
public class MappingJackson2JsonView extends AbstractJackson2View {
/**
* Default content type: "application/json".
* Overridable through {@link #setContentType}.
*/
public static final String DEFAULT_CONTENT_TYPE = "application/json";
@Nullable
private String jsonPrefix;
@Nullable
private Set<String> modelKeys;
private boolean extractValueFromSingleKeyModel = false;
/**
* Construct a new {@code MappingJackson2JsonView} using default configuration
* provided by {@link Jackson2ObjectMapperBuilder} and setting the content type
* to {@code application/json}.
*/
public MappingJackson2JsonView() {
super(Jackson2ObjectMapperBuilder.json().build(), DEFAULT_CONTENT_TYPE);
}
/**
* Construct a new {@code MappingJackson2JsonView} using the provided
* {@link ObjectMapper} and setting the content type to {@code application/json}.
* @since 4.2.1
*/
public MappingJackson2JsonView(ObjectMapper objectMapper) {
super(objectMapper, DEFAULT_CONTENT_TYPE);
}
/**
* Specify a custom prefix to use for this view's JSON output.
* Default is none.
* @see #setPrefixJson
*/
public void setJsonPrefix(String jsonPrefix) {
this.jsonPrefix = jsonPrefix;
}
/**
* Indicates whether the JSON output by this view should be prefixed with <code>")]}', "</code>.
* Default is {@code false}.
* <p>Prefixing the JSON string in this manner is used to help prevent JSON Hijacking.
* The prefix renders the string syntactically invalid as a script so that it cannot be hijacked.
* This prefix should be stripped before parsing the string as JSON.
* @see #setJsonPrefix
*/
public void setPrefixJson(boolean prefixJson) {
this.jsonPrefix = (prefixJson ? ")]}', " : null);
}
@Override
public void setModelKey(String modelKey) {
this.modelKeys = Collections.singleton(modelKey);
}
/**
* Set the attributes in the model that should be rendered by this view.
* When set, all other model attributes will be ignored.
*/
public void setModelKeys(@Nullable Set<String> modelKeys) {
this.modelKeys = modelKeys;
}
/**
* Return the attributes in the model that should be rendered by this view.
*/
@Nullable
public final Set<String> getModelKeys() {
return this.modelKeys;
}
/**
* Set whether to serialize models containing a single attribute as a map or
* whether to extract the single value from the model and serialize it directly.
* <p>The effect of setting this flag is similar to using
* {@code MappingJackson2HttpMessageConverter} with an {@code @ResponseBody}
* request-handling method.
* <p>Default is {@code false}.
*/
public void setExtractValueFromSingleKeyModel(boolean extractValueFromSingleKeyModel) {
this.extractValueFromSingleKeyModel = extractValueFromSingleKeyModel;
}
/**
* Filter out undesired attributes from the given model.
* The return value can be either another {@link Map} or a single value object.
* <p>The default implementation removes {@link BindingResult} instances and entries
* not included in the {@link #setModelKeys modelKeys} property.
* @param model the model, as passed on to {@link #renderMergedOutputModel}
* @return the value to be rendered
*/
@Override
protected Object filterModel(Map<String, Object> model) {
Map<String, Object> result = CollectionUtils.newHashMap(model.size());
Set<String> modelKeys = (!CollectionUtils.isEmpty(this.modelKeys) ? this.modelKeys : model.keySet());
model.forEach((clazz, value) -> {
if (!(value instanceof BindingResult) && modelKeys.contains(clazz) &&
!clazz.equals(JsonView.class.getName()) &&
!clazz.equals(FilterProvider.class.getName())) {
result.put(clazz, value);
}
});
return (this.extractValueFromSingleKeyModel && result.size() == 1 ? result.values().iterator().next() : result);
}
@Override
protected void writePrefix(JsonGenerator generator, Object object) throws IOException {
if (this.jsonPrefix != null) {
generator.writeRaw(this.jsonPrefix);
}
}
}
| spring-projects/spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/view/json/MappingJackson2JsonView.java |
1,426 | /*
* Copyright 2002-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.http.converter.json;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.Writer;
import java.lang.reflect.Type;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import org.springframework.core.GenericTypeResolver;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpInputMessage;
import org.springframework.http.HttpOutputMessage;
import org.springframework.http.MediaType;
import org.springframework.http.converter.AbstractGenericHttpMessageConverter;
import org.springframework.http.converter.HttpMessageNotReadableException;
import org.springframework.http.converter.HttpMessageNotWritableException;
import org.springframework.lang.Nullable;
/**
* Common base class for plain JSON converters, e.g. Gson and JSON-B.
*
* <p>Note that the Jackson converters have a dedicated class hierarchy
* due to their multi-format support.
*
* @author Juergen Hoeller
* @since 5.0
* @see GsonHttpMessageConverter
* @see JsonbHttpMessageConverter
* @see #readInternal(Type, Reader)
* @see #writeInternal(Object, Type, Writer)
*/
public abstract class AbstractJsonHttpMessageConverter extends AbstractGenericHttpMessageConverter<Object> {
/**
* The default charset used by the converter.
*/
public static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
@Nullable
private String jsonPrefix;
public AbstractJsonHttpMessageConverter() {
super(MediaType.APPLICATION_JSON, new MediaType("application", "*+json"));
setDefaultCharset(DEFAULT_CHARSET);
}
/**
* Specify a custom prefix to use for JSON output. Default is none.
* @see #setPrefixJson
*/
public void setJsonPrefix(String jsonPrefix) {
this.jsonPrefix = jsonPrefix;
}
/**
* Indicate whether the JSON output by this view should be prefixed with ")]}', ".
* Default is {@code false}.
* <p>Prefixing the JSON string in this manner is used to help prevent JSON
* Hijacking. The prefix renders the string syntactically invalid as a script
* so that it cannot be hijacked.
* This prefix should be stripped before parsing the string as JSON.
* @see #setJsonPrefix
*/
public void setPrefixJson(boolean prefixJson) {
this.jsonPrefix = (prefixJson ? ")]}', " : null);
}
@Override
public final Object read(Type type, @Nullable Class<?> contextClass, HttpInputMessage inputMessage)
throws IOException, HttpMessageNotReadableException {
return readResolved(GenericTypeResolver.resolveType(type, contextClass), inputMessage);
}
@Override
protected final Object readInternal(Class<?> clazz, HttpInputMessage inputMessage)
throws IOException, HttpMessageNotReadableException {
return readResolved(clazz, inputMessage);
}
private Object readResolved(Type resolvedType, HttpInputMessage inputMessage)
throws IOException, HttpMessageNotReadableException {
Reader reader = getReader(inputMessage);
try {
return readInternal(resolvedType, reader);
}
catch (Exception ex) {
throw new HttpMessageNotReadableException("Could not read JSON: " + ex.getMessage(), ex, inputMessage);
}
}
@Override
protected final void writeInternal(Object object, @Nullable Type type, HttpOutputMessage outputMessage)
throws IOException, HttpMessageNotWritableException {
Writer writer = getWriter(outputMessage);
if (this.jsonPrefix != null) {
writer.append(this.jsonPrefix);
}
try {
writeInternal(object, type, writer);
}
catch (Exception ex) {
throw new HttpMessageNotWritableException("Could not write JSON: " + ex.getMessage(), ex);
}
writer.flush();
}
/**
* Template method that reads the JSON-bound object from the given {@link Reader}.
* @param resolvedType the resolved generic type
* @param reader the {@code} Reader to use
* @return the JSON-bound object
* @throws Exception in case of read/parse failures
*/
protected abstract Object readInternal(Type resolvedType, Reader reader) throws Exception;
/**
* Template method that writes the JSON-bound object to the given {@link Writer}.
* @param object the object to write to the output message
* @param type the type of object to write (may be {@code null})
* @param writer the {@code} Writer to use
* @throws Exception in case of write failures
*/
protected abstract void writeInternal(Object object, @Nullable Type type, Writer writer) throws Exception;
private static Reader getReader(HttpInputMessage inputMessage) throws IOException {
return new InputStreamReader(inputMessage.getBody(), getCharset(inputMessage.getHeaders()));
}
private static Writer getWriter(HttpOutputMessage outputMessage) throws IOException {
return new OutputStreamWriter(outputMessage.getBody(), getCharset(outputMessage.getHeaders()));
}
private static Charset getCharset(HttpHeaders headers) {
Charset charset = (headers.getContentType() != null ? headers.getContentType().getCharset() : null);
return (charset != null ? charset : DEFAULT_CHARSET);
}
}
| spring-projects/spring-framework | spring-web/src/main/java/org/springframework/http/converter/json/AbstractJsonHttpMessageConverter.java |
1,427 | package jadx.gui.ui.codearea;
import java.awt.Color;
import java.awt.Font;
import java.awt.event.ActionEvent;
import java.awt.event.KeyEvent;
import java.awt.event.MouseEvent;
import java.beans.PropertyChangeListener;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.swing.AbstractAction;
import javax.swing.Icon;
import javax.swing.JCheckBoxMenuItem;
import javax.swing.KeyStroke;
import javax.swing.text.BadLocationException;
import javax.swing.text.EditorKit;
import javax.swing.text.JTextComponent;
import org.fife.ui.rsyntaxtextarea.FoldingAwareIconRowHeader;
import org.fife.ui.rsyntaxtextarea.RSyntaxTextArea;
import org.fife.ui.rsyntaxtextarea.RSyntaxTextAreaEditorKit;
import org.fife.ui.rsyntaxtextarea.RSyntaxTextAreaUI;
import org.fife.ui.rsyntaxtextarea.RSyntaxUtilities;
import org.fife.ui.rsyntaxtextarea.Style;
import org.fife.ui.rsyntaxtextarea.SyntaxConstants;
import org.fife.ui.rsyntaxtextarea.SyntaxScheme;
import org.fife.ui.rsyntaxtextarea.Theme;
import org.fife.ui.rtextarea.Gutter;
import org.fife.ui.rtextarea.GutterIconInfo;
import org.fife.ui.rtextarea.IconRowHeader;
import org.fife.ui.rtextarea.RTextArea;
import org.fife.ui.rtextarea.RTextAreaUI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import jadx.api.ICodeInfo;
import jadx.gui.device.debugger.BreakpointManager;
import jadx.gui.device.debugger.DbgUtils;
import jadx.gui.settings.JadxSettings;
import jadx.gui.treemodel.JClass;
import jadx.gui.treemodel.JNode;
import jadx.gui.treemodel.TextNode;
import jadx.gui.ui.panel.ContentPanel;
import jadx.gui.utils.NLS;
import jadx.gui.utils.UiUtils;
public final class SmaliArea extends AbstractCodeArea {
private static final Logger LOG = LoggerFactory.getLogger(SmaliArea.class);
private static final long serialVersionUID = 1334485631870306494L;
private static final Icon ICON_BREAKPOINT = UiUtils.openSvgIcon("debugger/db_set_breakpoint");
private static final Icon ICON_BREAKPOINT_DISABLED = UiUtils.openSvgIcon("debugger/db_disabled_breakpoint");
private static final Color BREAKPOINT_LINE_COLOR = Color.decode("#ad103c");
private static final Color DEBUG_LINE_COLOR = Color.decode("#9c1138");
private final JNode textNode;
private final JCheckBoxMenuItem cbUseSmaliV2;
private boolean curVersion = false;
private SmaliModel model;
SmaliArea(ContentPanel contentPanel, JClass node) {
super(contentPanel, node);
this.textNode = new TextNode(node.getName());
cbUseSmaliV2 = new JCheckBoxMenuItem(NLS.str("popup.bytecode_col"),
shouldUseSmaliPrinterV2());
cbUseSmaliV2.setAction(new AbstractAction(NLS.str("popup.bytecode_col")) {
private static final long serialVersionUID = -1111111202103170737L;
@Override
public void actionPerformed(ActionEvent e) {
JadxSettings settings = getContentPanel().getTabbedPane().getMainWindow().getSettings();
settings.setSmaliAreaShowBytecode(!settings.getSmaliAreaShowBytecode());
contentPanel.getTabbedPane().getTabs().forEach(v -> {
if (v instanceof ClassCodeContentPanel) {
switchModel();
((ClassCodeContentPanel) v).getSmaliCodeArea().refresh();
}
});
settings.sync();
}
});
getPopupMenu().add(cbUseSmaliV2);
switchModel();
}
@Override
public void load() {
if (getText().isEmpty() || curVersion != shouldUseSmaliPrinterV2()) {
curVersion = shouldUseSmaliPrinterV2();
model.load();
setCaretPosition(0);
setLoaded();
}
}
@Override
public ICodeInfo getCodeInfo() {
return ICodeInfo.EMPTY;
}
@Override
public void refresh() {
load();
}
@Override
public JNode getNode() {
// this area contains only smali without other node attributes
return textNode;
}
public JClass getJClass() {
return ((JClass) node);
}
private void switchModel() {
if (model != null) {
model.unload();
}
model = shouldUseSmaliPrinterV2() ? new DebugModel() : new NormalModel();
}
public void scrollToDebugPos(int pos) {
getContentPanel().getTabbedPane().getMainWindow()
.getSettings().setSmaliAreaShowBytecode(true); // don't sync when it's set programmatically.
cbUseSmaliV2.setState(shouldUseSmaliPrinterV2());
if (!(model instanceof DebugModel)) {
switchModel();
refresh();
}
model.togglePosHighlight(pos);
}
@Override
public Font getFont() {
if (model == null || isDisposed()) {
return super.getFont();
}
return model.getFont();
}
@Override
public Font getFontForTokenType(int type) {
return getFont();
}
private boolean shouldUseSmaliPrinterV2() {
return getContentPanel().getTabbedPane().getMainWindow().getSettings().getSmaliAreaShowBytecode();
}
private abstract class SmaliModel {
abstract void load();
abstract void unload();
Font getFont() {
return SmaliArea.super.getFont();
}
Font getFontForTokenType(int type) {
return SmaliArea.super.getFontForTokenType(type);
}
void setBreakpoint(int off) {
}
void togglePosHighlight(int pos) {
}
}
private class NormalModel extends SmaliModel {
public NormalModel() {
Theme theme = getContentPanel().getTabbedPane().getMainWindow().getEditorTheme();
setSyntaxScheme(theme.scheme);
setSyntaxEditingStyle(SYNTAX_STYLE_SMALI);
}
@Override
public void load() {
setText(getJClass().getSmali());
}
@Override
public void unload() {
}
}
private class DebugModel extends SmaliModel {
private KeyStroke bpShortcut;
private final String keyID = "set a break point";
private Gutter gutter;
private Object runningHighlightTag = null; // running line
private final SmaliV2Style smaliV2Style = new SmaliV2Style(SmaliArea.this);
private final Map<Integer, BreakpointLine> bpMap = new HashMap<>();
private final PropertyChangeListener listener = evt -> {
if (smaliV2Style.refreshTheme()) {
setSyntaxScheme(smaliV2Style);
}
};
public DebugModel() {
loadV2Style();
setSyntaxEditingStyle(SyntaxConstants.SYNTAX_STYLE_ASSEMBLER_6502);
addPropertyChangeListener(SYNTAX_SCHEME_PROPERTY, listener);
regBreakpointEvents();
}
@Override
public void load() {
if (gutter == null) {
gutter = RSyntaxUtilities.getGutter(SmaliArea.this);
gutter.setBookmarkingEnabled(true);
gutter.setIconRowHeaderInheritsGutterBackground(true);
Font baseFont = SmaliArea.super.getFont();
gutter.setLineNumberFont(baseFont.deriveFont(baseFont.getSize2D() - 1.0f));
}
setText(DbgUtils.getSmaliCode(((JClass) node).getCls().getClassNode()));
loadV2Style();
loadBreakpoints();
}
@Override
public void unload() {
removePropertyChangeListener(listener);
removeLineHighlight(runningHighlightTag);
UiUtils.removeKeyBinding(SmaliArea.this, bpShortcut, keyID);
BreakpointManager.removeListener((JClass) node);
bpMap.forEach((k, v) -> {
v.remove();
});
}
@Override
public Font getFont() {
return smaliV2Style.getFont();
}
@Override
public Font getFontForTokenType(int type) {
return smaliV2Style.getFont();
}
private void loadV2Style() {
setSyntaxScheme(smaliV2Style);
}
private void regBreakpointEvents() {
bpShortcut = KeyStroke.getKeyStroke(KeyEvent.VK_F2, 0);
UiUtils.addKeyBinding(SmaliArea.this, bpShortcut, "set break point", new AbstractAction() {
private static final long serialVersionUID = -1111111202103170738L;
@Override
public void actionPerformed(ActionEvent e) {
setBreakpoint(getCaretPosition());
}
});
BreakpointManager.addListener((JClass) node, this::setBreakpointDisabled);
}
private void loadBreakpoints() {
List<Integer> posList = BreakpointManager.getPositions((JClass) node);
for (Integer integer : posList) {
setBreakpoint(integer);
}
}
@Override
public void setBreakpoint(int pos) {
int line;
try {
line = getLineOfOffset(pos);
} catch (BadLocationException e) {
LOG.error("Failed to get line by offset: {}", pos, e);
return;
}
BreakpointLine bpLine = bpMap.remove(line);
if (bpLine == null) {
bpLine = new BreakpointLine(line);
bpLine.setDisabled(false);
bpMap.put(line, bpLine);
if (!BreakpointManager.set((JClass) node, line)) {
bpLine.setDisabled(true);
}
} else {
BreakpointManager.remove((JClass) node, line);
bpLine.remove();
}
}
@Override
public void togglePosHighlight(int pos) {
if (runningHighlightTag != null) {
removeLineHighlight(runningHighlightTag);
}
try {
int line = getLineOfOffset(pos);
runningHighlightTag = addLineHighlight(line, DEBUG_LINE_COLOR);
} catch (BadLocationException e) {
LOG.error("Failed to get line by offset: {}", pos, e);
}
}
private void setBreakpointDisabled(int pos) {
try {
int line = getLineOfOffset(pos);
bpMap.computeIfAbsent(line, k -> new BreakpointLine(line)).setDisabled(true);
} catch (BadLocationException e) {
LOG.error("Failed to get line by offset: {}", pos, e);
}
}
private class SmaliV2Style extends SyntaxScheme {
Theme curTheme;
public SmaliV2Style(SmaliArea smaliArea) {
super(true);
curTheme = smaliArea.getContentPanel().getTabbedPane().getMainWindow().getEditorTheme();
updateTheme();
}
public Font getFont() {
return getContentPanel().getTabbedPane().getMainWindow().getSettings().getSmaliFont();
}
public boolean refreshTheme() {
Theme theme = getContentPanel().getTabbedPane().getMainWindow().getEditorTheme();
boolean refresh = theme != curTheme;
if (refresh) {
curTheme = theme;
updateTheme();
}
return refresh;
}
private void updateTheme() {
Style[] mainStyles = curTheme.scheme.getStyles();
Style[] styles = new Style[mainStyles.length];
for (int i = 0; i < mainStyles.length; i++) {
Style mainStyle = mainStyles[i];
if (mainStyle == null) {
styles[i] = new Style();
} else {
// font will be hijacked by getFont & getFontForTokenType,
// so it doesn't need to be set here.
styles[i] = new Style(mainStyle.foreground, mainStyle.background, null);
}
}
setStyles(styles);
}
@Override
public void restoreDefaults(Font baseFont) {
restoreDefaults(baseFont, true);
}
@Override
public void restoreDefaults(Font baseFont, boolean fontStyles) {
// Note: it's a hook for continue using the editor theme, better don't remove it.
}
}
private class BreakpointLine {
Object highlightTag;
GutterIconInfo iconInfo;
boolean disabled;
final int line;
BreakpointLine(int line) {
this.line = line;
this.disabled = true;
}
void remove() {
gutter.removeTrackingIcon(iconInfo);
if (!this.disabled) {
removeLineHighlight(highlightTag);
}
}
void setDisabled(boolean disabled) {
if (disabled) {
if (!this.disabled) {
gutter.removeTrackingIcon(iconInfo);
removeLineHighlight(highlightTag);
try {
iconInfo = gutter.addLineTrackingIcon(line, ICON_BREAKPOINT_DISABLED);
} catch (BadLocationException e) {
LOG.error("Failed to add line tracking icon", e);
}
}
} else {
if (this.disabled) {
gutter.removeTrackingIcon(this.iconInfo);
try {
iconInfo = gutter.addLineTrackingIcon(line, ICON_BREAKPOINT);
highlightTag = addLineHighlight(line, BREAKPOINT_LINE_COLOR);
} catch (BadLocationException e) {
LOG.error("Failed to remove line tracking icon", e);
}
}
}
this.disabled = disabled;
}
}
}
@Override
protected RTextAreaUI createRTextAreaUI() {
// IconRowHeader won't fire an event when people click on it for adding/removing icons,
// so our poor breakpoints won't be set if we don't hijack IconRowHeader.
return new RSyntaxTextAreaUI(this) {
@Override
public EditorKit getEditorKit(JTextComponent tc) {
return new RSyntaxTextAreaEditorKit() {
private static final long serialVersionUID = -1111111202103170740L;
@Override
public IconRowHeader createIconRowHeader(RTextArea textArea) {
return new FoldingAwareIconRowHeader((RSyntaxTextArea) textArea) {
private static final long serialVersionUID = -1111111202103170739L;
@Override
public void mousePressed(MouseEvent e) {
int offs = textArea.viewToModel(e.getPoint());
if (offs > -1) {
model.setBreakpoint(offs);
}
}
};
}
};
}
};
}
}
| skylot/jadx | jadx-gui/src/main/java/jadx/gui/ui/codearea/SmaliArea.java |
1,428 | /* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.datamgr.tree;
import javax.swing.Icon;
import generic.theme.GIcon;
import ghidra.app.plugin.core.datamgr.archive.DomainFileArchive;
import ghidra.framework.model.DomainFile;
import ghidra.framework.model.DomainObject;
import ghidra.program.model.listing.Program;
import resources.MultiIcon;
import resources.icons.TranslateIcon;
public abstract class DomainFileArchiveNode extends ArchiveNode {
//@formatter:off
private static Icon CHECKED_OUT_ICON = new GIcon("icon.plugin.datatypes.tree.node.archive.file.checked.out");
private static Icon CHECKED_OUT_EXCLUSIVE_ICON = new GIcon("icon.plugin.datatypes.tree.node.archive.file.checked.out.exclusive");
private static Icon HIJACKED_ICON = new GIcon("icon.plugin.datatypes.tree.node.archive.file.hijacked");
private static Icon READ_ONLY_ICON = new GIcon("icon.plugin.datatypes.tree.node.archive.file.read.only");
private static Icon NOT_LATEST_CHECKED_OUT_ICON = new GIcon("icon.plugin.datatypes.tree.node.archive.file.checked.out.not.latest");
//@formatter:on
private boolean isChanged;
private boolean isReadOnly;
private boolean isHijacked;
private boolean isCheckedOut;
private boolean isCheckedOutExclusive;
private boolean isVersioned;
private int version;
private int latestVersion;
private String domainFileInfoString;
public DomainFileArchiveNode(DomainFileArchive archive, ArrayPointerFilterState filterState) {
super(archive, filterState);
updateDomainFileInfo();
}
private void updateDomainFileInfo() {
DomainObject domainObject = ((DomainFileArchive) archive).getDomainObject();
DomainFile domainFile = ((DomainFileArchive) archive).getDomainFile();
isChanged = domainObject.isChanged();
isReadOnly = domainFile.isReadOnly();
isHijacked = domainFile.isHijacked();
isVersioned = domainFile.isVersioned();
version = (isVersioned || !domainFile.canSave()) ? domainFile.getVersion()
: DomainFile.DEFAULT_VERSION;
isCheckedOutExclusive =
(!isVersioned && domainObject.hasExclusiveAccess() && !isReadOnly) ||
(isVersioned && domainFile.isCheckedOutExclusive());
isCheckedOut = isCheckedOutExclusive || domainFile.isCheckedOut();
latestVersion = domainFile.getLatestVersion();
domainFileInfoString = createDomainFileInfoString();
}
private String createDomainFileInfoString() {
DomainObject domainObject = ((DomainFileArchive) archive).getDomainObject();
String name = "";
if (isHijacked) {
name += " (hijacked)";
}
else if (isVersioned) {
if (version == latestVersion && !isCheckedOut) {
name += " (" + version + ")";
}
else {
name += " (" + version + " of " + latestVersion + ")";
}
}
else if (version != DomainFile.DEFAULT_VERSION) {
name += " @ " + version;
}
if (!(domainObject instanceof Program) && isChanged) {
name += " *";
}
return name;
}
@Override
public abstract String getToolTip();
@Override
public boolean canDelete() {
return false;
}
@Override
public Icon getIcon(boolean expanded) {
Icon baseIcon = archive.getIcon(expanded);
DtBackgroundIcon bgIcon = new DtBackgroundIcon(isVersioned);
MultiIcon multiIcon = new MultiIcon(bgIcon);
multiIcon.addIcon(baseIcon);
if (isReadOnly) {
multiIcon.addIcon(new TranslateIcon(READ_ONLY_ICON, 14, 3));
}
else if (isHijacked) {
multiIcon.addIcon(new TranslateIcon(HIJACKED_ICON, 8, -4));
}
else if (isCheckedOut) {
if (isCheckedOutExclusive) {
multiIcon.addIcon(new TranslateIcon(CHECKED_OUT_EXCLUSIVE_ICON, 8, -4));
}
else if (version < latestVersion) {
multiIcon.addIcon(new TranslateIcon(NOT_LATEST_CHECKED_OUT_ICON, 8, -4));
}
else {
multiIcon.addIcon(new TranslateIcon(CHECKED_OUT_ICON, 8, -4));
}
}
// TODO: add program architecture state
return multiIcon;
}
protected String getDomainObjectInfo() {
return domainFileInfoString;
}
public DomainFile getDomainFile() {
return ((DomainFileArchive) archive).getDomainFile();
}
@Override
public void nodeChanged() {
super.nodeChanged();
updateDomainFileInfo();
}
}
| NationalSecurityAgency/ghidra | Ghidra/Features/Base/src/main/java/ghidra/app/plugin/core/datamgr/tree/DomainFileArchiveNode.java |
1,429 | package ctrip.android.bundle.hack;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
/**
* Created by yb.wang on 14/12/31.
* Hack--反射机制反射后包装的形式:类,方法,字段
*/
public class Hack {
private static AssertionFailureHandler sFailureHandler;
public static interface AssertionFailureHandler {
boolean onAssertionFailure(HackDeclaration.HackAssertionException hackAssertionException);
}
public static abstract class HackDeclaration {
public static class HackAssertionException extends Throwable {
private static final long serialVersionUID = 1;
private Class<?> mHackedClass;
private String mHackedFieldName;
private String mHackedMethodName;
public HackAssertionException(String str) {
super(str);
}
public HackAssertionException(Exception exception) {
super(exception);
}
public String toString() {
return getCause() != null ? getClass().getName() + ": " + getCause() : super.toString();
}
public Class<?> getHackedClass() {
return this.mHackedClass;
}
public void setHackedClass(Class<?> cls) {
this.mHackedClass = cls;
}
public String getHackedMethodName() {
return this.mHackedMethodName;
}
public void setHackedMethodName(String str) {
this.mHackedMethodName = str;
}
public String getHackedFieldName() {
return this.mHackedFieldName;
}
public void setHackedFieldName(String str) {
this.mHackedFieldName = str;
}
}
}
public static class HackedClass<C> {
protected Class<C> mClass;
public <T> HackedField<C, T> staticField(String str) throws HackDeclaration.HackAssertionException {
return new HackedField<C, T>(this.mClass, str, 8);
}
public <T> HackedField<C, T> field(String str) throws HackDeclaration.HackAssertionException {
return new HackedField(this.mClass, str, 0);
}
public HackedMethod staticMethod(String str, Class<?>... clsArr) throws HackDeclaration.HackAssertionException {
return new HackedMethod(this.mClass, str, clsArr, 8);
}
public HackedMethod method(String str, Class<?>... clsArr) throws HackDeclaration.HackAssertionException {
return new HackedMethod(this.mClass, str, clsArr, 0);
}
public HackedConstructor constructor(Class<?>... clsArr) throws HackDeclaration.HackAssertionException {
return new HackedConstructor(this.mClass, clsArr);
}
public HackedClass(Class<C> cls) {
this.mClass = cls;
}
public Class<C> getmClass() {
return this.mClass;
}
}
public static class HackedConstructor {
protected Constructor<?> mConstructor;
HackedConstructor(Class<?> cls, Class<?>[] clsArr) throws HackDeclaration.HackAssertionException {
if (cls != null) {
try {
this.mConstructor = cls.getDeclaredConstructor(clsArr);
} catch (Exception e) {
HackDeclaration.HackAssertionException hackAssertionException = new HackDeclaration.HackAssertionException(e);
hackAssertionException.setHackedClass(cls);
Hack.fail(hackAssertionException);
}
}
}
public Object getInstance(Object... objArr) throws IllegalArgumentException {
Object obj = null;
this.mConstructor.setAccessible(true);
try {
obj = this.mConstructor.newInstance(objArr);
} catch (Exception e) {
e.printStackTrace();
}
return obj;
}
}
public static class HackedField<C, T> {
private final Field mField;
public HackedField<C, T> ofGenericType(Class<?> cls) throws HackDeclaration.HackAssertionException {
if (!(this.mField == null || cls.isAssignableFrom(this.mField.getType()))) {
Hack.fail(new HackDeclaration.HackAssertionException(new ClassCastException(this.mField + " is not of type " + cls)));
}
return this;
}
public HackedField<C, T> ofType(Class<?> cls) throws HackDeclaration.HackAssertionException {
if (!(this.mField == null || cls.isAssignableFrom(this.mField.getType()))) {
Hack.fail(new HackDeclaration.HackAssertionException(new ClassCastException(this.mField + " is not of type " + cls)));
}
return this;
}
public HackedField<C, T> ofType(String str) throws HackDeclaration.HackAssertionException {
HackedField<C, T> ofType = null;
try {
ofType = ofType((Class<T>) Class.forName(str));
} catch (Exception e) {
Hack.fail(new HackDeclaration.HackAssertionException(e));
}
return ofType;
}
public T get(C c) {
try {
return (T) this.mField.get(c);
} catch (IllegalAccessException e) {
e.printStackTrace();
return null;
}
}
public void set(C c, Object obj) {
try {
this.mField.set(c, obj);
} catch (IllegalAccessException e) {
e.printStackTrace();
}
}
public void hijack(C c, Interception.InterceptionHandler<?> interceptionHandler) {
T obj = get(c);
if (obj == null) {
throw new IllegalStateException("Cannot hijack null");
}
set(c, Interception.proxy(obj, (Interception.InterceptionHandler) interceptionHandler, obj.getClass().getInterfaces()));
}
HackedField(Class<C> cls, String str, int i) throws HackDeclaration.HackAssertionException {
Field field = null;
if (cls == null) {
this.mField = null;
return;
}
try {
field = cls.getDeclaredField(str);
if (i > 0 && (field.getModifiers() & i) != i) {
Hack.fail(new HackDeclaration.HackAssertionException(field + " does not match modifiers: " + i));
}
field.setAccessible(true);
} catch (Exception e) {
HackDeclaration.HackAssertionException hackAssertionException = new HackDeclaration.HackAssertionException(e);
hackAssertionException.setHackedClass(cls);
hackAssertionException.setHackedFieldName(str);
Hack.fail(hackAssertionException);
} finally {
this.mField = field;
}
}
public Field getField() {
return this.mField;
}
}
public static class HackedMethod {
protected final Method mMethod;
public Object invoke(Object obj, Object... objArr) throws IllegalArgumentException, InvocationTargetException {
Object obj2 = null;
try {
obj2 = this.mMethod.invoke(obj, objArr);
} catch (IllegalAccessException e) {
e.printStackTrace();
}
return obj2;
}
HackedMethod(Class<?> cls, String str, Class<?>[] clsArr, int i) throws HackDeclaration.HackAssertionException {
Method method = null;
if (cls == null) {
this.mMethod = null;
return;
}
try {
method = cls.getDeclaredMethod(str, clsArr);
if (i > 0 && (method.getModifiers() & i) != i) {
Hack.fail(new HackDeclaration.HackAssertionException(method + " does not match modifiers: " + i));
}
method.setAccessible(true);
} catch (Exception e) {
HackDeclaration.HackAssertionException hackAssertionException = new HackDeclaration.HackAssertionException(e);
hackAssertionException.setHackedClass(cls);
hackAssertionException.setHackedMethodName(str);
Hack.fail(hackAssertionException);
} finally {
this.mMethod = method;
}
}
public Method getMethod() {
return this.mMethod;
}
}
public static <T> HackedClass<T> into(Class<T> cls) {
return new HackedClass(cls);
}
public static <T> HackedClass<T> into(String str) throws HackDeclaration.HackAssertionException {
try {
return new HackedClass(Class.forName(str));
} catch (Exception e) {
fail(new HackDeclaration.HackAssertionException(e));
return new HackedClass(null);
}
}
private static void fail(HackDeclaration.HackAssertionException hackAssertionException) throws HackDeclaration.HackAssertionException {
if (sFailureHandler == null || !sFailureHandler.onAssertionFailure(hackAssertionException)) {
throw hackAssertionException;
}
}
public static void setAssertionFailureHandler(AssertionFailureHandler assertionFailureHandler) {
sFailureHandler = assertionFailureHandler;
}
private Hack() {
}
}
| CtripMobile/DynamicAPK | bundle/src/ctrip/android/bundle/hack/Hack.java |
1,430 | package hashing;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Created by gouthamvidyapradhan on 10/04/2018. A string S of lowercase letters is given. We want
* to partition this string into as many parts as possible so that each letter appears in at most
* one part, and return a list of integers representing the size of these parts.
*
* <p>Example 1: Input: S = "ababcbacadefegdehijhklij" Output: [9,7,8] Explanation: The partition is
* "ababcbaca", "defegde", "hijhklij". This is a partition so that each letter appears in at most
* one part. A partition like "ababcbacadefegde", "hijhklij" is incorrect, because it splits S into
* less parts. Note:
*
* <p>S will have length in range [1, 500]. S will consist of lowercase letters ('a' to 'z') only.
*
* <p>Solution O(n): Maintain a hashmap index of last occurrence of a character and do a linear
* check for max index, get the length and add it to the result set.
*/
public class PartitionLabels {
/**
* Main method
*
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
System.out.println(new PartitionLabels().partitionLabels("abc"));
}
public List<Integer> partitionLabels(String S) {
if (S == null || S.trim().isEmpty()) return new ArrayList<>();
Map<Character, Integer> map = new HashMap<>();
for (int i = S.length() - 1; i >= 0; i--) {
char c = S.charAt(i);
map.putIfAbsent(c, i);
}
List<Integer> result = new ArrayList<>();
int start = 0;
int max = map.get(S.charAt(0));
for (int i = 0; i < S.length(); i++) {
char c = S.charAt(i);
if (map.get(c) > max) {
max = map.get(c);
} else if (i == max) {
result.add(max - start + 1);
if (i < S.length() - 1) {
start = i + 1;
max = map.get(S.charAt(i + 1));
}
}
}
return result;
}
}
| gouthampradhan/leetcode | src/main/java/hashing/PartitionLabels.java |
1,431 | //TinyURL is a URL shortening service where you enter a URL such as https://leetcode.com/problems/design-tinyurl
//and it returns a short URL such as http://tinyurl.com/4e9iAk.
//
//Design the encode and decode methods for the TinyURL service. There is no restriction on how your
//encode/decode algorithm should work. You just need to ensure that a URL can be encoded to a tiny URL
//and the tiny URL can be decoded to the original URL.
public class EncodeAndDecodeTinyURL {
HashMap<String, String> map = new HashMap<String, String>();
String characters = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
int count = 1;
public String getKey() {
String key = "";
while(count > 0) {
count--;
key += characters.charAt(count);
count /= characters.length();
}
return key;
}
// Encodes a URL to a shortened URL.
public String encode(String longUrl) {
String key = getKey();
map.put(key, longUrl);
count++;
return "http://tinyurl.com/" + key;
}
// Decodes a shortened URL to its original URL.
public String decode(String shortUrl) {
return map.get(shortUrl.replace("http://tinyurl.com/", ""));
}
}
// Your Codec object will be instantiated and called as such:
// Codec codec = new Codec();
// codec.decode(codec.encode(url));
| kdn251/interviews | company/google/EncodeAndDecodeTinyURL.java |
1,432 | /* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.framework.data;
import java.awt.*;
import java.io.*;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.swing.Icon;
import org.apache.commons.lang3.StringUtils;
import db.DBHandle;
import db.Field;
import db.buffers.*;
import generic.theme.GColor;
import generic.theme.GIcon;
import ghidra.framework.client.*;
import ghidra.framework.model.*;
import ghidra.framework.protocol.ghidra.GhidraURL;
import ghidra.framework.store.*;
import ghidra.framework.store.FileSystem;
import ghidra.framework.store.local.LocalFileSystem;
import ghidra.framework.store.local.LocalFolderItem;
import ghidra.util.*;
import ghidra.util.exception.*;
import ghidra.util.task.TaskMonitor;
import resources.MultiIcon;
import resources.icons.TranslateIcon;
/**
* {@link GhidraFileData} provides the managed object which represents a project file that
* corresponds to matched {@link FolderItem} pair across both a versioned and private
* filesystem and viewed as a single file at the project level. This class closely mirrors the
* {@link DomainFile} interface and is used by the {@link GhidraFile} implementation; both of which
* represent immutable file references. Changes made to this file's name or path are not reflected
* in old {@link DomainFile} instances and must be re-instantiated following such a change.
* Any long-term retention of {@link DomainFolder} and {@link DomainFile} instances requires an
* appropriate change listener to properly discard/reacquire such instances.
*/
public class GhidraFileData {
static final int ICON_WIDTH = 18;
static final int ICON_HEIGHT = 17;
private static final boolean ALWAYS_MERGE = System.getProperty("ForceMerge") != null;
//@formatter:off
public static final Icon UNSUPPORTED_FILE_ICON = new GIcon("icon.project.data.file.ghidra.unsupported");
public static final Icon CHECKED_OUT_ICON = new GIcon("icon.project.data.file.ghidra.checked.out");
public static final Icon CHECKED_OUT_EXCLUSIVE_ICON = new GIcon("icon.project.data.file.ghidra.checked.out.exclusive");
public static final Icon HIJACKED_ICON = new GIcon("icon.project.data.file.ghidra.hijacked");
public static final Icon VERSION_ICON = new VersionIcon();
public static final Icon READ_ONLY_ICON = new GIcon("icon.project.data.file.ghidra.read.only");
public static final Icon NOT_LATEST_CHECKED_OUT_ICON = new GIcon("icon.project.data.file.ghidra.not.latest");
//@formatter:on
private DefaultProjectData projectData;
private LocalFileSystem fileSystem;
private FileSystem versionedFileSystem;
private DomainFolderChangeListener listener;
private GhidraFolderData parent;
private String name;
private String fileID;
private LocalFolderItem folderItem;
private FolderItem versionedFolderItem;
private Icon icon;
private Icon disabledIcon;
private AtomicBoolean busy = new AtomicBoolean();
// TODO: Many of the old methods assumed that the state was up-to-date due to
// refreshing ... we are relying on non-refreshed data to be dropped from cache map and no
// longer used.
/**
* Construct a file instance with a specified name and a correpsonding parent folder
* @param parent parent folder
* @param name file name
* @throws IOException if an IO error occurs
*/
GhidraFileData(GhidraFolderData parent, String name) throws IOException {
this.parent = parent;
this.name = name;
this.projectData = parent.getProjectData();
this.fileSystem = parent.getLocalFileSystem();
this.versionedFileSystem = parent.getVersionedFileSystem();
this.listener = parent.getChangeListener();
refresh();
}
/**
* Construct a new file instance with a specified name and a corresponding parent folder using
* up-to-date folder items.
* @param parent parent folder
* @param name file name
* @param folderItem local folder item
* @param versionedFolderItem versioned folder item
*/
GhidraFileData(GhidraFolderData parent, String name, LocalFolderItem folderItem,
FolderItem versionedFolderItem) {
this.parent = parent;
this.name = name;
this.folderItem = folderItem;
this.versionedFolderItem = versionedFolderItem;
this.projectData = parent.getProjectData();
this.fileSystem = parent.getLocalFileSystem();
this.versionedFileSystem = parent.getVersionedFileSystem();
this.listener = parent.getChangeListener();
validateCheckout();
updateFileID();
}
void refresh(LocalFolderItem localFolderItem, FolderItem verFolderItem) {
icon = null;
disabledIcon = null;
this.folderItem = localFolderItem;
this.versionedFolderItem = verFolderItem;
validateCheckout();
boolean fileIDset = updateFileID();
if (parent.visited()) {
// NOTE: we should maintain some cached data so we can determine if something really changed
listener.domainFileStatusChanged(getDomainFile(), fileIDset);
}
}
private boolean refresh() throws IOException {
String parentPath = parent.getPathname();
if (folderItem == null) {
folderItem = fileSystem.getItem(parentPath, name);
}
else {
folderItem = folderItem.refresh();
}
if (versionedFileSystem.isOnline()) {
try {
if (versionedFolderItem == null) {
versionedFolderItem = versionedFileSystem.getItem(parentPath, name);
}
else {
versionedFolderItem = versionedFolderItem.refresh();
}
validateCheckout();
}
catch (IOException e) {
// ignore
}
}
if (folderItem == null && versionedFolderItem == null) {
throw new FileNotFoundException(name + " not found");
}
return updateFileID();
}
private boolean updateFileID() {
boolean fileIdWasNull = fileID == null;
fileID = folderItem != null ? folderItem.getFileID() : versionedFolderItem.getFileID();
return fileIdWasNull && fileID != null;
}
/**
* Notification callback that this file's status may have changed
* @throws IOException if IO error occurs
*/
void statusChanged() throws IOException {
statusChanged(false);
}
private void statusChanged(boolean fileIDset) throws IOException {
icon = null;
disabledIcon = null;
fileIDset |= refresh();
if (parent.visited()) {
// NOTE: we should maintain some cached data so we can determine if something really changed
listener.domainFileStatusChanged(getDomainFile(), fileIDset);
}
}
private void validateCheckout() {
if (fileSystem.isReadOnly() || !versionedFileSystem.isOnline()) {
return;
}
try {
if (folderItem != null && folderItem.isCheckedOut()) {
// Cleanup checkout status which may be stale
if (versionedFolderItem != null) {
ItemCheckoutStatus coStatus =
versionedFolderItem.getCheckout(folderItem.getCheckoutId());
if (coStatus == null) {
folderItem.clearCheckout();
}
}
else {
folderItem.clearCheckout();
}
}
}
catch (IOException e) {
// ignore
}
}
/**
* Perform file in-use / busy check
* @throws FileInUseException if file is in-use or busy
*/
void checkInUse() throws FileInUseException {
synchronized (fileSystem) {
if (busy.get() || getOpenedDomainObject() != null) {
throw new FileInUseException(name + " is in use");
}
}
}
/**
* Returns true if the domain object in this domain file exists and has an open transaction.
* @return true if busy
*/
boolean isBusy() {
if (busy.get()) {
return true;
}
DomainObjectAdapter dobj = getOpenedDomainObject();
return dobj != null && !dobj.canLock();
}
/**
* Removes this file from file-index maintained by {@link ProjectData} instance
* following its removal from the project.
*/
void dispose() {
projectData.removeFromIndex(fileID);
// NOTE: clearing the following can cause issues since there may be some residual
// activity/use which will get a NPE
// parent = null;
// projectData = null;
// listener = null;
}
/**
* Returns a unique file-ID if one has been established or null
* @return the file-ID or null if failed to obtain ID
*/
String getFileID() {
return fileID;
}
/**
* Returns the path name to the domain object.
* @return the path name
*/
String getPathname() {
String path = parent.getPathname();
if (path.length() != FileSystem.SEPARATOR.length()) {
path += FileSystem.SEPARATOR;
}
path += name;
return path;
}
/**
* Get the name of this project file
* @return the name
*/
String getName() {
return name;
}
/**
* Get the parent folder for this file.
* @return the parent
*/
GhidraFolderData getParent() {
return parent;
}
/**
* @return {@link DomainFile} instance which corresponds to this file.
*/
GhidraFile getDomainFile() {
return new GhidraFile(parent.getDomainFolder(), name);
}
/**
* Get a remote Ghidra URL for this domain file if available within a remote repository.
* @param ref reference within a file, may be null. NOTE: such reference interpretation
* is specific to a domain object and tooling with limited support.
* @return remote Ghidra URL for this file or null
*/
URL getSharedProjectURL(String ref) {
synchronized (fileSystem) {
RepositoryAdapter repository = projectData.getRepository();
if (versionedFolderItem != null && repository != null) {
URL folderURL = parent.getDomainFolder().getSharedProjectURL();
try {
String spec = name;
if (!StringUtils.isEmpty(ref)) {
spec += "#" + ref;
}
return new URL(folderURL, spec);
}
catch (MalformedURLException e) {
// ignore
}
}
return null;
}
}
/**
* Get a local Ghidra URL for this domain file if available within a non-transient local
* project. A null value is returned for a transient project.
* @param ref reference within a file, may be null. NOTE: such reference interpretation
* is specific to a domain object and tooling with limited support.
* @return local Ghidra URL for this file or null if transient or not applicable
*/
URL getLocalProjectURL(String ref) {
synchronized (fileSystem) {
ProjectLocator projectLocator = parent.getProjectLocator();
if (!projectLocator.isTransient()) {
return GhidraURL.makeURL(projectLocator, getPathname(), ref);
}
return null;
}
}
/**
* Reassign a new file-ID to resolve file-ID conflict.
* Conflicts can occur as a result of a cancelled check-out.
* @throws IOException if an IO error occurs
*/
void resetFileID() throws IOException {
synchronized (fileSystem) {
if (versionedFolderItem != null || isCheckedOut()) {
throw new IOException("File ID reset not permitted on versioned file");
}
if (folderItem != null) {
fileID = folderItem.resetFileID();
}
}
}
/**
* Set the name on this file.
* @param newName domain file name
* @return renamed domain file (older DomainFile instances becomes invalid since they are immutable)
* @throws InvalidNameException if newName contains illegal characters
* @throws DuplicateFileException if a file named newName
* already exists in this files domain folder.
* @throws FileInUseException if this file is in-use / checked-out.
* @throws IOException if an IO or access error occurs.
*/
GhidraFile setName(String newName) throws InvalidNameException, IOException {
synchronized (fileSystem) {
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException("setName permitted within writeable project only");
}
checkInUse();
if (parent.containsFile(newName)) {
throw new DuplicateFileException("File named " + newName + " already exists.");
}
String oldName = name;
String folderPath = parent.getPathname();
if (isHijacked()) {
fileSystem.moveItem(folderPath, name, folderPath, newName);
folderItem = null;
parent.fileChanged(name);
parent.fileChanged(newName);
return parent.getDomainFile(newName);
}
if (versionedFolderItem == null) {
if (!isCheckedOut()) {
fileSystem.moveItem(folderPath, name, folderPath, newName);
folderItem = fileSystem.getItem(folderPath, newName);
}
else {
throw new FileInUseException(name + " is checked-out");
}
}
else {
versionedFileSystem.moveItem(folderPath, name, folderPath, newName);
versionedFolderItem = versionedFileSystem.getItem(folderPath, newName);
}
name = newName;
parent.fileRenamed(oldName, newName);
return parent.getDomainFile(newName);
}
}
/**
* Returns content-type string for this file
* @return the file content type or a reserved content type {@link ContentHandler#MISSING_CONTENT}
* or {@link ContentHandler#UNKNOWN_CONTENT}.
*/
String getContentType() {
synchronized (fileSystem) {
FolderItem item = folderItem != null ? folderItem : versionedFolderItem;
// this can happen when we are trying to load a version file from
// a server to which we are not connected
if (item == null) {
return ContentHandler.MISSING_CONTENT;
}
String contentType = item.getContentType();
return contentType != null ? contentType : ContentHandler.UNKNOWN_CONTENT;
}
}
/**
* Get content handler for this file
* @return content handler
* @throws IOException if an IO error occurs, file not found, or unsupported content
*/
ContentHandler<?> getContentHandler() throws IOException {
synchronized (fileSystem) {
FolderItem item = folderItem != null ? folderItem : versionedFolderItem;
// this can happen when we are trying to load a version file from
// a server to which we are not connected
if (item == null) {
throw new FileNotFoundException(name + " not found");
}
return DomainObjectAdapter.getContentHandler(item.getContentType());
}
}
/**
* Returns the underlying Class for the domain object in this domain file.
* @return the class or null if does not correspond to a domain object.
*/
Class<? extends DomainObject> getDomainObjectClass() {
synchronized (fileSystem) {
try {
return getContentHandler().getDomainObjectClass();
}
catch (IOException e) {
// ignore missing content handler
}
return DomainObject.class;
}
}
/**
* Returns changes made to versioned file by others since checkout was performed.
* NOTE: This method is unable to cope with version issues which may require an
* upgrade.
* @return change set or null
* @throws VersionException latest version was created with a different version of software
* which prevents rapid determination of change set.
* @throws IOException if a folder item access error occurs or change set was
* produced by newer version of software and can not be read
*/
ChangeSet getChangesByOthersSinceCheckout() throws VersionException, IOException {
synchronized (fileSystem) {
if (versionedFolderItem != null && folderItem != null && folderItem.isCheckedOut()) {
return getContentHandler().getChangeSet(versionedFolderItem,
folderItem.getCheckoutVersion(), versionedFolderItem.getCurrentVersion());
}
return null;
}
}
/**
* Returns the domainObject for this DomainFile only if it is already open.
* @return the already opened domainObject or null if it is not currently open.
*/
private DomainObjectAdapter getOpenedDomainObject() {
return projectData.getOpenedDomainObject(getPathname());
}
/**
* Opens and returns the current domain object. If the domain object is already opened,
* then the existing open domain object is returned.
* @param consumer consumer of the domain object which is responsible for
* releasing it after use. When all the consumers using the domain object release it, then
* the object is closed and its resources released.
* @param okToUpgrade if true, allows the system to upgrade out of data domain objects to
* be in compliance with the current version of Ghidra. A Version exception will be thrown
* if the domain object cannot be upgraded OR okToUpgrade is false and the domain object is
* out of date.
* @param okToRecover if true, allows the system to recover unsaved file changes which
* resulted from a crash. If false, any existing recovery data will be deleted.
* This flag is only relevant if project is open for update (isInProject) and the file can be
* opened for update.
* @param monitor permits monitoring of open progress.
* @return an open domain object can be modified and saved. (Not read-only)
* @throws VersionException if the domain object could not be read due
* to a version format change. If okToUpgrade is true, then a VersionException indicates
* that the domain object cannot be upgraded to the current format. If okToUpgrade is false,
* then the VersionException only means the object is not in the current format - it
* may or may not be possible to upgrade.
* @throws IOException if an IO or access error occurs.
* @throws CancelledException if monitor cancelled operation
*/
DomainObject getDomainObject(Object consumer, boolean okToUpgrade, boolean okToRecover,
TaskMonitor monitor) throws VersionException, IOException, CancelledException {
// Don't allow this call while versioning operation is on-going
if (busy.get()) {
throw new FileInUseException("Cannot open during versioning operation");
}
FolderItem myFolderItem;
DomainObjectAdapter domainObj = null;
synchronized (fileSystem) {
if (fileSystem.isReadOnly() || isLinkFile()) {
return getReadOnlyDomainObject(consumer, DomainFile.DEFAULT_VERSION, monitor);
}
domainObj = getOpenedDomainObject();
if (domainObj != null) {
if (!domainObj.addConsumer(consumer)) {
domainObj = null;
projectData.clearDomainObject(getPathname());
}
else {
return domainObj;
}
}
ContentHandler<?> contentHandler = getContentHandler();
if (folderItem == null) {
DomainObjectAdapter doa = contentHandler.getReadOnlyObject(versionedFolderItem,
DomainFile.DEFAULT_VERSION, true, consumer, monitor);
doa.setChanged(false);
DomainFileProxy proxy = new DomainFileProxy(name, parent.getPathname(), doa,
DomainFile.DEFAULT_VERSION, fileID, parent.getProjectLocator());
proxy.setLastModified(getLastModifiedTime());
return doa;
}
myFolderItem = folderItem;
domainObj = contentHandler.getDomainObject(myFolderItem, parent.getUserFileSystem(),
FolderItem.DEFAULT_CHECKOUT_ID, okToUpgrade, okToRecover, consumer, monitor);
projectData.setDomainObject(getPathname(), domainObj);
// Notify file manager of in-use domain object.
// A link-file object is indirect with tracking intiated by the URL-referenced file.
if (!isLinkFile()) {
projectData.trackDomainFileInUse(domainObj);
}
}
// Set domain file for newly opened domain object
// NOTE: Some domain object implementations may throw RuntimeExceptions
// so cleanup is required in those cases
try {
domainObj.setDomainFile(getDomainFile());
}
catch (Exception e) {
domainObj.release(consumer);
projectData.clearDomainObject(getPathname());
// generate IOException
Throwable cause = e.getCause();
if (cause == null) {
cause = e;
}
if (cause instanceof IOException) {
throw (IOException) cause;
}
else if (cause instanceof VersionException) {
throw (VersionException) cause;
}
throw new IOException(e.getMessage(), e);
}
listener.domainFileObjectOpenedForUpdate(domainObj.getDomainFile(), domainObj);
return domainObj;
}
/**
* Returns a "read-only" version of the domain object. "Read-only" means that the domain
* object cannot be saved back into its original domain object. It can still be modified
* and saved to a new domain file. The domain object will be assigned a temporary domain
* file that will not allow a "save" operation. The user must do a "save as"
* to a new filename.
* @param consumer consumer of the domain object which is responsible for
* releasing it after use.
* @param version the domain object version requested. DEFAULT_VERSION should be
* specified to open the current version.
* @param monitor permits monitoring of open progress.
* @return a new domain object that is disassociated from its original domain file.
* @throws VersionException if the domain object could not be read due
* to a version format change.
* @throws FileNotFoundException if the stored file/version was not found.
* @throws IOException if an IO or access error occurs.
* @throws CancelledException if monitor cancelled operation
*/
DomainObject getReadOnlyDomainObject(Object consumer, int version, TaskMonitor monitor)
throws VersionException, IOException, CancelledException {
synchronized (fileSystem) {
FolderItem item =
(folderItem != null && version == DomainFile.DEFAULT_VERSION) ? folderItem
: versionedFolderItem;
DomainObjectAdapter doa =
getContentHandler().getReadOnlyObject(item, version, true, consumer, monitor);
doa.setChanged(false);
// Notify file manager of in-use domain object.
// A link-file object is indirect with tracking intiated by the URL-referenced file.
if (!isLinkFile()) {
projectData.trackDomainFileInUse(doa);
}
DomainFileProxy proxy = new DomainFileProxy(name, getParent().getPathname(), doa,
version, fileID, parent.getProjectLocator());
proxy.setLastModified(getLastModifiedTime());
return doa;
}
}
/**
* Returns a new DomainObject that cannot be changed or saved to its original file.
* NOTE: The use of this method should generally be avoided since it can't
* handle version changes that may have occured and require a data upgrade
* (e.g., DB schema change).
* @param consumer consumer of the domain object which is responsible for
* releasing it after use.
* @param version the domain object version requested. DEFAULT_VERSION should be
* specified to open the current version.
* @param monitor permits monitoring of open progress.
* @return a new domain object that is disassociated from its original domain file
* and cannot be modified
* @throws VersionException if the domain object could not be read due
* to a version format change.
* @throws FileNotFoundException if the stored file/version was not found.
* @throws IOException if an IO or access error occurs.
* @throws CancelledException if monitor cancelled operation
*/
DomainObject getImmutableDomainObject(Object consumer, int version, TaskMonitor monitor)
throws VersionException, IOException, CancelledException {
synchronized (fileSystem) {
DomainObjectAdapter obj = null;
ContentHandler<?> contentHandler = getContentHandler();
if (versionedFolderItem == null ||
(version == DomainFile.DEFAULT_VERSION && folderItem != null) || isHijacked()) {
obj = contentHandler.getImmutableObject(folderItem, consumer, version, -1, monitor);
}
else {
obj = contentHandler.getImmutableObject(versionedFolderItem, consumer, version, -1,
monitor);
}
// Notify file manager of in-use domain object.
// A link-file object is indirect with tracking intiated by the URL-referenced file.
if (!isLinkFile()) {
projectData.trackDomainFileInUse(obj);
}
DomainFileProxy proxy = new DomainFileProxy(name, getParent().getPathname(), obj,
version, fileID, parent.getProjectLocator());
proxy.setLastModified(getLastModifiedTime());
return obj;
}
}
/**
* Prior to invoking getDomainObject, this method can be used to determine if
* unsaved changes can be recovered on the next open.
* @return true if recovery data exists.
*/
boolean canRecover() {
synchronized (fileSystem) {
DomainObjectAdapter dobj = getOpenedDomainObject();
if (!fileSystem.isReadOnly() && folderItem != null && dobj == null) {
return folderItem.canRecover();
}
return false;
}
}
/**
* If the file has an updatable domain object with unsaved changes, generate a recovery
* snapshot.
* @return true if snapshot successful or not needed, false if file is busy which prevents
* snapshot, or snapshot was cancelled.
* @throws IOException if there is an exception saving the snapshot
*/
boolean takeRecoverySnapshot() throws IOException {
if (fileSystem.isReadOnly()) {
return true;
}
DomainObjectAdapter dobj = projectData.getOpenedDomainObject(getPathname());
if (!(dobj instanceof DomainObjectAdapterDB) || !dobj.isChanged()) {
return true;
}
LockingTaskMonitor monitor = null;
DomainObjectAdapterDB dbObjDB = (DomainObjectAdapterDB) dobj;
if (busy.getAndSet(true)) {
return false; // snapshot must be postponed
}
try {
monitor = dbObjDB.lockForSnapshot(true, "Recovery Snapshot Task");
if (monitor == null) {
return true;
}
monitor.setMessage(getName());
return dbObjDB.getDBHandle().takeRecoverySnapshot(dbObjDB.getChangeSet(), monitor);
}
catch (CancelledException e) {
return false;
}
finally {
busy.set(false);
if (monitor != null) {
monitor.releaseLock(); // releases lock
}
}
}
/**
* Get a long value representing the time when the data was last modified.
* @return the time
*/
long getLastModifiedTime() {
synchronized (fileSystem) {
if (folderItem != null) {
return folderItem.lastModified();
}
if (versionedFolderItem != null) {
return versionedFolderItem.lastModified();
}
return 0;
}
}
/**
* Get the state based Icon image for the domain file based upon its content class.
* @param disabled true if the icon return should be rendered as
* not enabled
* @return image icon
*/
Icon getIcon(boolean disabled) {
if (disabled) {
if (disabledIcon == null) {
disabledIcon = generateIcon(true);
}
return disabledIcon;
}
if (icon == null) {
icon = generateIcon(false);
}
return icon;
}
private Icon generateIcon(boolean disabled) {
if (parent == null) {
// instance has been disposed
return DomainFile.UNSUPPORTED_FILE_ICON;
}
synchronized (fileSystem) {
boolean isLink = isLinkFile();
FolderItem item = folderItem != null ? folderItem : versionedFolderItem;
Icon baseIcon = new TranslateIcon(getBaseIcon(item), 1, 1);
if (versionedFolderItem != null) {
MultiIcon multiIcon = new MultiIcon(VERSION_ICON, disabled);
multiIcon.addIcon(baseIcon);
if (isHijacked()) {
multiIcon.addIcon(HIJACKED_ICON);
}
else if (isCheckedOut()) {
if (isCheckedOutExclusive()) {
multiIcon.addIcon(CHECKED_OUT_EXCLUSIVE_ICON);
}
else {
if (getVersion() == getLatestVersion()) {
multiIcon.addIcon(CHECKED_OUT_ICON);
}
else {
multiIcon.addIcon(NOT_LATEST_CHECKED_OUT_ICON);
}
}
}
if (isLink) {
multiIcon.addIcon(new TranslateIcon(LinkHandler.LINK_ICON, 0, 1));
}
return multiIcon;
}
else if (folderItem != null) {
MultiIcon multiIcon = new MultiIcon(baseIcon, disabled, ICON_WIDTH, ICON_HEIGHT);
if (isReadOnly() && !fileSystem.isReadOnly()) {
multiIcon.addIcon(new TranslateIcon(READ_ONLY_ICON, 8, 9));
}
if (isCheckedOut()) {
if (isCheckedOutExclusive()) {
multiIcon.addIcon(CHECKED_OUT_EXCLUSIVE_ICON);
}
else {
multiIcon.addIcon(CHECKED_OUT_ICON);
}
}
if (isLink) {
multiIcon.addIcon(new TranslateIcon(LinkHandler.LINK_ICON, 0, 1));
}
return multiIcon;
}
}
return DomainFile.UNSUPPORTED_FILE_ICON;
}
private Icon getBaseIcon(FolderItem item) {
try {
return getContentHandler().getIcon();
}
catch (IOException e) {
// ignore missing content handler
}
return DomainFile.UNSUPPORTED_FILE_ICON;
}
/**
* Return whether the domain object in this domain file has changed.
* @return true if changed
*/
boolean isChanged() {
DomainObjectAdapter dobj = getOpenedDomainObject();
return dobj != null && dobj.isChanged();
}
/**
* Returns true if this is a checked-out file.
* @return true if checked-out
*/
boolean isCheckedOut() {
synchronized (fileSystem) {
return folderItem != null && folderItem.isCheckedOut();
}
}
/**
* Returns true if this a checked-out file with exclusive access.
* @return true if checked-out exclusively
*/
boolean isCheckedOutExclusive() {
synchronized (fileSystem) {
if (folderItem == null) {
return false;
}
if (folderItem.isCheckedOutExclusive()) {
return true;
}
// All checkouts for non-shared versioning are treated as exclusive
return !versionedFileSystem.isShared() && folderItem.isCheckedOut();
}
}
/**
* Returns true if this is a checked-out file which has been modified since it was checked-out.
* @return true if modified since check-out
*/
boolean modifiedSinceCheckout() {
synchronized (fileSystem) {
return isCheckedOut() &&
folderItem.getCurrentVersion() != folderItem.getLocalCheckoutVersion();
}
}
/**
* Returns whether this file is explicitly marked as read-only. This method is only supported
* by the local file system and does not apply to a versioned file that is not checked-out.
* A versioned file that is not checked-out will always return false, while a
* {@link DomainFileProxy} will always return true.
* From a framework point of view a read-only file can never be changed.
* @return true if this file is marked read-only
*/
boolean isReadOnly() {
synchronized (fileSystem) {
return folderItem != null && folderItem.isReadOnly();
}
}
/**
* Return true if this is a versioned database, else false
* @return true if versioned
*/
boolean isVersioned() {
synchronized (fileSystem) {
if (versionedFolderItem == null) {
return isCheckedOut();
}
return !isHijacked();
}
}
/**
* Returns true if the file is versioned but a private copy also exists.
* @return true if hijacked
*/
boolean isHijacked() {
synchronized (fileSystem) {
return folderItem != null && versionedFolderItem != null && !folderItem.isCheckedOut();
}
}
/**
* Returns true if this file may be checked-out from the associated repository.
* User's with read-only repository access will not have checkout ability.
* @return true if can checkout
*/
boolean canCheckout() {
synchronized (fileSystem) {
try {
if (folderItem != null || fileSystem.isReadOnly() ||
versionedFileSystem.isReadOnly()) {
return false;
}
return !isLinkFile();
}
catch (IOException e) {
return false;
}
}
}
/**
* Returns true if this file may be checked-in to the associated repository.
* @return true if can check-in
*/
boolean canCheckin() {
synchronized (fileSystem) {
try {
return (!fileSystem.isReadOnly() && !versionedFileSystem.isReadOnly() &&
modifiedSinceCheckout());
}
catch (IOException e) {
return false;
}
}
}
/**
* Return either the latest version if the file is not checked-out or the version that
* was checked-out or a specific version that was requested.
* @return the version
*/
int getVersion() {
synchronized (fileSystem) {
try {
if (folderItem != null) {
if (folderItem.isCheckedOut()) {
return folderItem.getCheckoutVersion();
}
return folderItem.getCurrentVersion();
}
return versionedFolderItem.getCurrentVersion();
}
catch (IOException e) {
Msg.error(this, "IO error", e);
return -1;
}
}
}
/**
* Returns true if this file represents the latest version of the associated domain object.
* @return true if the latest version
*/
int getLatestVersion() {
synchronized (fileSystem) {
if (!isHijacked() && versionedFolderItem != null) {
return versionedFolderItem.getCurrentVersion();
}
return 0;
}
}
/**
* Returns true if this file can be merged with the current versioned file.
* @return true if can merge
*/
boolean canMerge() {
synchronized (fileSystem) {
try {
return (!fileSystem.isReadOnly() && versionedFolderItem != null &&
folderItem != null && folderItem.isCheckedOut() &&
(versionedFolderItem.getCurrentVersion() > folderItem.getCheckoutVersion()));
}
catch (IOException e) {
Msg.error(this, "IO Error", e);
}
return false;
}
}
/**
* Sets the object to read-only. This method may only be invoked
* for private files (i.e., not versioned).
* @param state if true file will be read-only and may not be updated, if false the
* file may be updated.
* @throws IOException if an IO error occurs.
*/
void setReadOnly(boolean state) throws IOException {
synchronized (fileSystem) {
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException("setReadOnly permitted in writeable project only");
}
if (isVersioned()) {
throw new AssertException("Versioned files do not support read-only setting");
}
folderItem.setReadOnly(state);
statusChanged();
}
}
/**
* Returns list of all available versions.
* @return the versions
* @throws IOException if there is an exception getting the history
*/
Version[] getVersionHistory() throws IOException {
synchronized (fileSystem) {
if (versionedFolderItem != null) {
return versionedFolderItem.getVersions();
}
return null;
}
}
/**
* Perform neccessary check to ensure this file may be added to version control.
* @throws IOException if any checks fail or other IO error occurs
*/
void checkCanAddToRepository() throws IOException {
if (!versionedFileSystem.isOnline()) {
throw new NotConnectedException("Not connected to repository server");
}
if (fileSystem.isReadOnly() || versionedFileSystem.isReadOnly()) {
throw new ReadOnlyException(
"versioning permitted within writeable project and repository only");
}
if (folderItem == null) {
throw new FileNotFoundException("File not found");
}
if (folderItem.isCheckedOut() || versionedFolderItem != null) {
throw new IOException("File already versioned");
}
ContentHandler<?> contentHandler = getContentHandler();
if (contentHandler instanceof LinkHandler linkHandler) {
// must check local vs remote URL
if (!GhidraURL.isServerRepositoryURL(LinkHandler.getURL(folderItem))) {
throw new IOException("Local project link-file may not be versioned");
}
}
else if (contentHandler.isPrivateContentType()) {
throw new IOException("Content may not be versioned: " + getContentType());
}
}
/**
* Adds this private file to version control.
* @param comment new version comment
* @param keepCheckedOut if true, the file will be initially checked-out
* @param monitor progress monitor
* @throws FileInUseException if this file is in-use.
* @throws IOException if an IO or access error occurs. Also if file is not
* private.
* @throws CancelledException if the monitor cancelled the operation
*/
void addToVersionControl(String comment, boolean keepCheckedOut, TaskMonitor monitor)
throws IOException, CancelledException {
checkCanAddToRepository();
if (busy.getAndSet(true)) {
throw new FileInUseException(name + " is busy");
}
DomainObjectAdapterDB inUseDomainObj = null;
projectData.mergeStarted();
try {
inUseDomainObj = getAndLockInUseDomainObjectForMergeUpdate("checkin");
if (isLinkFile()) {
keepCheckedOut = false;
}
else if (inUseDomainObj != null && !keepCheckedOut) {
keepCheckedOut = true;
Msg.warn(this, "File currently open - must keep checked-out: " + name);
}
synchronized (fileSystem) {
String parentPath = parent.getPathname();
String user = ClientUtil.getUserName();
try {
if (folderItem instanceof DatabaseItem) {
DatabaseItem databaseItem = (DatabaseItem) folderItem;
BufferFile bufferFile = databaseItem.open();
try {
versionedFolderItem = versionedFileSystem.createDatabase(parentPath,
name, folderItem.getFileID(), bufferFile, comment,
folderItem.getContentType(), false, monitor, user);
}
finally {
bufferFile.dispose();
}
}
else if (folderItem instanceof DataFileItem) {
DataFileItem dataFileItem = (DataFileItem) folderItem;
InputStream istream = dataFileItem.getInputStream();
try {
versionedFolderItem = versionedFileSystem.createDataFile(parentPath,
name, istream, comment, folderItem.getContentType(), monitor);
}
finally {
istream.close();
}
}
else {
throw new AssertException("Unknown folder item type");
}
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
if (keepCheckedOut) {
// Maintain exclusive chekout if private repository or file is open for update
boolean exclusive = !versionedFileSystem.isShared() || (inUseDomainObj != null);
ProjectLocator projectLocator = parent.getProjectLocator();
CheckoutType checkoutType;
if (projectLocator.isTransient()) {
checkoutType = CheckoutType.TRANSIENT;
exclusive = true;
}
else {
// All checkouts for non-shared versioning are treated as exclusive
checkoutType =
(exclusive || !versionedFileSystem.isShared()) ? CheckoutType.EXCLUSIVE
: CheckoutType.NORMAL;
}
ItemCheckoutStatus checkout = versionedFolderItem.checkout(checkoutType, user,
ItemCheckoutStatus.getProjectPath(projectLocator.toString(),
projectLocator.isTransient()));
folderItem.setCheckout(checkout.getCheckoutId(), exclusive,
checkout.getCheckoutVersion(), folderItem.getCurrentVersion());
}
else {
// NOTE: file open read-only may prevent removal and result in hijack
try {
folderItem.delete(-1, ClientUtil.getUserName());
folderItem = null;
}
catch (FileInUseException e1) {
// Ignore - should result in Hijacked file
}
}
if (inUseDomainObj != null) {
getContentHandler().resetDBSourceFile(folderItem, inUseDomainObj);
}
} // end of synchronized block
if (inUseDomainObj != null) {
inUseDomainObj.invalidate();
}
}
finally {
unlockDomainObject(inUseDomainObj);
busy.set(false);
projectData.mergeEnded();
parent.deleteLocalFolderIfEmpty();
parent.fileChanged(name);
}
}
/**
* Checkout this file for update. If this file is already
* private, this method does nothing.
* @param exclusive if true an exclusive checkout will be requested
* @param monitor progress monitor
* @return true if checkout successful, false if an exclusive checkout was not possible
* due to other users having checkouts of this file. A request for a non-exclusive checkout
* will never return false.
* @throws IOException if an IO or access error occurs.
* @throws CancelledException if task monitor cancelled operation.
*/
boolean checkout(boolean exclusive, TaskMonitor monitor)
throws IOException, CancelledException {
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException("checkout permitted in writeable project only");
}
if (versionedFileSystem.isReadOnly()) {
throw new ReadOnlyException(
"Versioned repository is read-only and does not permit checkout");
}
synchronized (fileSystem) {
if (folderItem != null) {
throw new AssertException("Cannot checkout, private file exists");
}
if (!versionedFileSystem.isOnline()) {
throw new NotConnectedException("Not connected to repository server");
}
if (isLinkFile()) {
return false;
}
String user = ClientUtil.getUserName();
ProjectLocator projectLocator = parent.getProjectLocator();
CheckoutType checkoutType;
if (projectLocator.isTransient()) {
checkoutType = CheckoutType.TRANSIENT;
exclusive = true;
}
else {
// All checkouts for non-shared versioning are treated as exclusive
checkoutType =
(exclusive || !versionedFileSystem.isShared()) ? CheckoutType.EXCLUSIVE
: CheckoutType.NORMAL;
}
ItemCheckoutStatus checkout =
versionedFolderItem.checkout(checkoutType, user, ItemCheckoutStatus
.getProjectPath(projectLocator.toString(), projectLocator.isTransient()));
if (checkout == null) {
return false;
}
// FileID may be established during an exclusive checkout
boolean fileIDset = false;
if (fileID == null) {
fileID = versionedFolderItem.getFileID();
fileIDset = (fileID != null);
}
int checkoutVersion = checkout.getCheckoutVersion();
String parentPath = parent.getPathname();
try {
if (versionedFolderItem instanceof DatabaseItem) {
DatabaseItem databaseItem = (DatabaseItem) versionedFolderItem;
BufferFile bufferFile = databaseItem.open(checkoutVersion);
try {
folderItem = fileSystem.createDatabase(parentPath, name, fileID, bufferFile,
null, databaseItem.getContentType(), false, monitor, user);
}
finally {
bufferFile.dispose();
}
}
else if (versionedFolderItem instanceof DataFileItem) {
DataFileItem dataFileItem = (DataFileItem) versionedFolderItem;
InputStream istream = dataFileItem.getInputStream(checkoutVersion);
try {
folderItem = fileSystem.createDataFile(parentPath, name, istream, null,
dataFileItem.getContentType(), monitor);
}
finally {
istream.close();
}
}
else {
throw new AssertException("Can't checkout - unknown file type");
}
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
finally {
if (folderItem == null) {
versionedFolderItem.terminateCheckout(checkout.getCheckoutId(), false);
}
}
folderItem.setCheckout(checkout.getCheckoutId(), exclusive, checkoutVersion,
folderItem.getCurrentVersion());
statusChanged(fileIDset);
}
return true;
}
private boolean quickCheckin(CheckinHandler checkinHandler, TaskMonitor monitor)
throws IOException, CancelledException {
if (!(versionedFolderItem instanceof DatabaseItem)) {
return false;
}
monitor.checkCancelled();
monitor.setMessage("Initiating Check In for " + name);
boolean success = false;
LocalManagedBufferFile srcFile = null;
ManagedBufferFile checkinFile = null;
try {
synchronized (fileSystem) {
// Make sure version does not change by opening for update before checking versions
checkinFile =
((DatabaseItem) versionedFolderItem).openForUpdate(folderItem.getCheckoutId());
if (versionedFolderItem.getCurrentVersion() != folderItem.getCheckoutVersion()) {
return false;
}
// TODO: assumes folderItem is local - should probably defer createNewVersion
// to folderItem if possible (requires refactor)
srcFile = (LocalManagedBufferFile) ((DatabaseItem) folderItem).open();
}
String comment = checkinHandler.getComment();
if (checkinHandler.createKeepFile()) {
DomainObject sourceObj = null;
try {
sourceObj = getContentHandler().getImmutableObject(folderItem, this,
DomainFile.DEFAULT_VERSION, -1, monitor);
createKeepFile(sourceObj, monitor);
}
catch (VersionException e) {
// ignore - unable to create keep file
}
finally {
release(sourceObj);
}
}
monitor.checkCancelled();
synchronized (fileSystem) {
srcFile.createNewVersion(checkinFile, comment, monitor);
success = true;
}
}
finally {
if (checkinFile != null) {
checkinFile.close();
}
if (srcFile != null) {
srcFile.close();
}
}
return success;
}
/**
* Verify checkout status and that current user is the checkout user for this file
* @param operationName name of user case (e.g., checkin)
* @throws IOException if server/repository will not permit current user to checkin,
* or update checkout version of current file. (i.e., server login does not match
* user name used at time of initial checkout)
*/
private void verifyCheckout(String operationName) throws IOException {
if (versionedFileSystem instanceof LocalFileSystem) {
return; // rely on local project ownership
}
String repoUserName = versionedFileSystem.getUserName();
if (repoUserName == null) {
throw new IOException("File " + operationName + " not permitted (not connected)");
}
ItemCheckoutStatus checkoutStatus = getCheckoutStatus();
if (checkoutStatus == null) {
throw new IOException("File not checked out");
}
String checkoutUserName = checkoutStatus.getUser();
if (!repoUserName.equals(checkoutUserName)) {
throw new IOException("File " + operationName + " not permitted - checkout user '" +
checkoutUserName + "' differs from repository user '" + repoUserName + "'");
}
}
/**
* Performs check in to associated repository. File must be checked-out
* and modified since checkout.
* @param checkinHandler provides user input data to complete checkin process.
* @param monitor the TaskMonitor.
* @throws IOException if an IO or access error occurs
* @throws VersionException if unable to handle domain object version in versioned filesystem.
* We are unable to upgrade since this would only occur if checkout is not exclusive.
* @throws CancelledException if task monitor cancelled operation
*/
void checkin(CheckinHandler checkinHandler, TaskMonitor monitor)
throws IOException, VersionException, CancelledException {
if (!versionedFileSystem.isOnline()) {
throw new NotConnectedException("Not connected to repository server");
}
if (fileSystem.isReadOnly() || versionedFileSystem.isReadOnly()) {
throw new ReadOnlyException(
"checkin permitted within writeable project and repository only");
}
if (!isCheckedOut()) {
throw new IOException("File not checked out");
}
if (isChanged()) {
throw new IOException("File has unsaved changes");
}
if (canRecover()) {
throw new IOException("File recovery data exists");
}
if (!modifiedSinceCheckout()) {
throw new IOException("File has not been modified since checkout");
}
verifyCheckout("checkin");
if (monitor == null) {
monitor = TaskMonitor.DUMMY;
}
if (busy.getAndSet(true)) {
throw new FileInUseException(name + " is busy");
}
DomainObjectAdapterDB inUseDomainObj = null;
projectData.mergeStarted();
try {
ContentHandler<?> contentHandler = getContentHandler();
inUseDomainObj = getAndLockInUseDomainObjectForMergeUpdate("checkin");
boolean keepCheckedOut = checkinHandler.keepCheckedOut();
if (inUseDomainObj != null && !keepCheckedOut) {
keepCheckedOut = true;
Msg.warn(this, "File currently open - must keep checked-out: " + name);
}
boolean quickCheckin = ALWAYS_MERGE ? false : quickCheckin(checkinHandler, monitor);
if (!quickCheckin) {
if (SystemUtilities.isInHeadlessMode()) {
throw new IOException(
"Checkin failed, file requires merge which is not supported in headless mode");
}
Msg.info(this, "Checkin with merge for " + name);
DomainObjectAdapter checkinObj = contentHandler.getDomainObject(versionedFolderItem,
null, folderItem.getCheckoutId(), false, false, this, monitor);
checkinObj.setDomainFile(new DomainFileProxy(name, getParent().getPathname(),
checkinObj, versionedFolderItem.getCurrentVersion() + 1, fileID,
parent.getProjectLocator()));
DomainObject sourceObj = null;
DomainObject originalObj = null;
DomainObject latestObj = null;
try {
synchronized (fileSystem) {
int coVer = folderItem.getCheckoutVersion();
sourceObj = contentHandler.getImmutableObject(folderItem, this,
DomainFile.DEFAULT_VERSION, -1, monitor);
originalObj = contentHandler.getImmutableObject(versionedFolderItem, this,
coVer, -1, monitor);
latestObj = contentHandler.getImmutableObject(versionedFolderItem, this,
DomainFile.DEFAULT_VERSION, coVer, monitor);
}
DomainObjectMergeManager mergeMgr = contentHandler.getMergeManager(checkinObj,
sourceObj, originalObj, latestObj);
if (!mergeMgr.merge(monitor)) {
Msg.info(this, "Checkin with merge terminated for " + name);
return; // error displayed by merge manager
}
checkinObj.save(checkinHandler.getComment(), monitor);
if (checkinHandler.createKeepFile()) {
if (monitor != null) {
monitor.setMessage("Generating local keep file...");
}
createKeepFile(sourceObj, monitor);
}
}
finally {
checkinObj.release(this);
release(sourceObj);
release(originalObj);
release(latestObj);
}
}
synchronized (fileSystem) {
versionedFolderItem = versionedFileSystem.getItem(parent.getPathname(), name);
if (versionedFolderItem == null) {
throw new IOException("Checkin failed, versioned item not found");
}
Msg.info(this, "Checkin completed for " + name);
if (keepCheckedOut) {
boolean success = false;
try {
if (monitor != null) {
monitor.setMessage("Updating local checkout file...");
}
folderItem.updateCheckout(versionedFolderItem, !quickCheckin, monitor);
success = true;
}
finally {
if (!success) {
// Failed to update checkout for unknown reason
try {
if (inUseDomainObj != null) {
// On error disassociate open domain object from this file
projectData.clearDomainObject(getPathname());
// An invalid version (-2) is specified to avoid file match
inUseDomainObj.setDomainFile(new DomainFileProxy(name,
parent.getPathname(), inUseDomainObj, -2, fileID,
parent.getProjectLocator()));
inUseDomainObj.setTemporary(true);
}
undoCheckout(false, true);
}
catch (IOException e) {
Msg.error(this, "Undo checkout error", e);
}
}
}
}
else {
undoCheckout(false, true);
}
if (inUseDomainObj != null) {
contentHandler.resetDBSourceFile(folderItem, inUseDomainObj);
}
} // end of synchronized block
if (inUseDomainObj != null) {
inUseDomainObj.invalidate();
}
}
finally {
unlockDomainObject(inUseDomainObj);
busy.set(false);
projectData.mergeEnded();
parent.deleteLocalFolderIfEmpty();
parent.fileChanged(name);
}
}
private void release(DomainObject domainObj) {
if (domainObj != null) {
domainObj.release(this);
}
}
private void unlockDomainObject(DomainObjectAdapterDB lockedDomainObject) {
try {
if (lockedDomainObject != null) {
lockedDomainObject.unlock();
}
}
catch (Exception e) {
Msg.error(this, "Unexpected " + getContentType() + " lock error: " + getName());
}
}
private DomainObjectAdapterDB getAndLockInUseDomainObjectForMergeUpdate(String operation)
throws IOException {
DomainObjectAdapterDB inUseDomainObj;
synchronized (fileSystem) {
DomainObjectAdapter domainObj = getOpenedDomainObject();
if (domainObj == null) {
return null;
}
// If we proceed with file in-use it must be instance of DomainObjectAdapterDB
if (!(domainObj instanceof DomainObjectAdapterDB)) {
throw new FileInUseException(name + " is in use");
}
inUseDomainObj = (DomainObjectAdapterDB) domainObj;
if (inUseDomainObj.isChanged()) {
throw new FileInUseException(name + " is in use w/ unsaved changes");
}
}
// Ensure that existing domain object will support DB merge update and is can be locked
ContentHandler<?> contentHandler = getContentHandler();
if (!contentHandler.canResetDBSourceFile() || !inUseDomainObj.lock(operation) ||
inUseDomainObj.getDBHandle().hasUncommittedChanges()) {
throw new FileInUseException(name + " is in use");
}
return inUseDomainObj;
}
/**
* Get checkout status associated with a versioned file.
* @return checkout status or null if not checked-out to current associated project.
* @throws IOException if an IO or access error occurs
*/
ItemCheckoutStatus getCheckoutStatus() throws IOException {
synchronized (fileSystem) {
if (!versionedFileSystem.isOnline()) {
throw new NotConnectedException("Not connected to repository server");
}
if (versionedFolderItem == null) {
throw new IOException("File is not versioned");
}
ItemCheckoutStatus status = null;
if (folderItem != null) {
long checkoutId = folderItem.getCheckoutId();
if (checkoutId >= 0) {
status = versionedFolderItem.getCheckout(checkoutId);
}
}
return status;
}
}
/**
* Get a list of checkouts by all users for the associated versioned file.
* @return list of checkouts
* @throws IOException if an IO or access error occurs
*/
ItemCheckoutStatus[] getCheckouts() throws IOException {
synchronized (fileSystem) {
if (!versionedFileSystem.isOnline()) {
throw new NotConnectedException("Not connected to repository server");
}
if (versionedFolderItem == null) {
throw new IOException("File is not versioned");
}
return versionedFolderItem.getCheckouts();
}
}
/**
* Forcefully terminate a checkout for the associated versioned file.
* The user must be the owner of the checkout or have administrator privilege
* on the versioned filesystem (i.e., repository).
* @param checkoutId checkout ID
* @throws IOException if an IO or access error occurs
*/
void terminateCheckout(long checkoutId) throws IOException {
synchronized (fileSystem) {
if (!versionedFileSystem.isOnline()) {
throw new NotConnectedException("Not connected to repository server");
}
if (versionedFolderItem == null) {
throw new IOException("File is not versioned");
}
versionedFolderItem.terminateCheckout(checkoutId, true);
}
}
/**
* Undo "checked-out" file. The original repository file is restored.
* @param keep if true, the private database will be renamed with a .keep
* extension.
* @param inUseOK true if a busy/in-use file state may be ignored, else false
* @throws NotConnectedException if shared project and not connected to repository
* @throws FileInUseException if this file is in-use (when {@code inUseOK} == false).
* @throws IOException if file is not checked-out or an IO / access error occurs.
*/
void undoCheckout(boolean keep, boolean inUseOK) throws IOException {
undoCheckout(keep, false, inUseOK);
}
/**
* Undo "checked-out" file. The original repository file is restored.
* @param keep if true, the private database will be renamed with a .keep
* extension.
* @param force true if operation may be proceed even when not connected to the versioned
* file-system.
* @param inUseOK true if a busy/in-use file state may be ignored, else false
* @throws NotConnectedException if shared project and not connected to repository
* @throws FileInUseException if this file is in-use (when {@code inUseOK} == false).
* @throws IOException if file is not checked-out or an IO / access error occurs.
*/
void undoCheckout(boolean keep, boolean force, boolean inUseOK) throws IOException {
synchronized (fileSystem) {
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException("undoCheckout permitted within writeable project only");
}
if (!inUseOK) {
checkInUse();
}
boolean doForce = false;
boolean isOnline = versionedFileSystem.isOnline();
if (!isOnline) {
if (!force) {
throw new NotConnectedException("Not connected to repository server");
}
doForce = true;
}
if (!isCheckedOut()) {
throw new IOException("File not checked out");
}
if (!doForce) {
verifyCheckout("undo-checkout");
long checkoutId = folderItem.getCheckoutId();
versionedFolderItem.terminateCheckout(checkoutId, true);
}
String keepName = getKeepName();
if (keep) {
folderItem.clearCheckout();
try {
// generate new local keep file
String folderPath = parent.getPathname();
fileSystem.moveItem(folderPath, name, folderPath, keepName);
parent.fileChanged(keepName);
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
}
else {
folderItem.delete(-1, ClientUtil.getUserName());
parent.deleteLocalFolderIfEmpty();
}
folderItem = null;
parent.fileChanged(name);
}
}
private String getKeepName() {
String tempName = name + ".keep";
String keep = tempName;
int cnt = 0;
while (fileSystem.fileExists(parent.getPathname(), keep) || versionedFileExists(keep)) {
keep = tempName + "." + (++cnt);
}
return keep;
}
private boolean versionedFileExists(String fileName) {
try {
return (versionedFileSystem.isOnline() &&
versionedFileSystem.getItem(parent.getPathname(), fileName) != null);
}
catch (IOException e) {
// ignore
}
return false;
}
private void createKeepFile(DomainObject sourceObj, TaskMonitor monitor) {
String keepName = name + ".keep";
try {
GhidraFileData keepFileData = parent.getFileData(keepName, false);
if (keepFileData != null) {
try {
keepFileData.delete();
}
catch (IOException e) {
Msg.error(this,
"Failed to create keep file: failed to remove old keep file: " + keepName,
e);
return;
}
}
keepName = getKeepName();
Msg.info(this, "Creating old version keep file: " + keepName);
parent.createFile(keepName, sourceObj, monitor);
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
catch (CancelledException e) {
// ignore
}
catch (IOException e) {
Msg.error(this, "Failed to create keep file: " + keepName, e);
}
}
/**
* Delete the entire database for this file, including any version files.
* @throws FileInUseException if this file is in-use / checked-out.
* @throws UserAccessException if the user does not have permission to delete the file.
* @throws IOException if an IO or access error occurs.
*/
void delete() throws IOException {
synchronized (fileSystem) {
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException("delete permitted within writeable project only");
}
checkInUse();
if (folderItem != null && folderItem.isCheckedOut()) {
throw new FileInUseException("Can not delete file while it is checked-out");
}
if (isHijacked()) {
folderItem.delete(-1, ClientUtil.getUserName());
parent.deleteLocalFolderIfEmpty();
Msg.info(this, "Deleted local file, revealing hijacked file " + name);
}
else if (versionedFolderItem == null) {
folderItem.delete(-1, ClientUtil.getUserName());
Msg.info(this, "Deleted local file " + name);
}
else {
versionedFolderItem.delete(-1, ClientUtil.getUserName());
Msg.info(this, "Deleted versioned file " + name);
}
if (fileID != null && (folderItem == null || versionedFolderItem == null ||
!fileID.equals(versionedFolderItem.getFileID()))) {
removeAssociatedUserDataFile();
}
parent.fileChanged(name);
}
}
/**
* Deletes a specific version of a file from the versioned filesystem.
* @param version specific version to be deleted. The version must either
* be the oldest or latest, or -1 which will attempt to remove all versions.
* When deleting the latest version, this method could take a long time
* to return since the previous version must be reconstructed within the
* versioned filesystem.
* @throws IOException if an IO error occurs, including the inability
* to delete a version because this item is checked-out, the user does
* not have permission, or the specified version is not the oldest or
* latest.
*/
void delete(int version) throws IOException {
synchronized (fileSystem) {
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException(
"delete(version) permitted within writeable project only");
}
if (versionedFolderItem == null) {
throw new IOException(name + " is not versioned");
}
if (folderItem != null && folderItem.getCheckoutVersion() == version) {
throw new FileInUseException(name + " version " + version + " is checked-out");
}
versionedFolderItem.delete(version, ClientUtil.getUserName());
}
}
private void removeAssociatedUserDataFile() {
try {
ContentHandler<?> contentHandler = getContentHandler();
if (contentHandler instanceof DBWithUserDataContentHandler) {
FolderItem item = folderItem != null ? folderItem : versionedFolderItem;
((DBWithUserDataContentHandler<?>) contentHandler).removeUserDataFile(item,
parent.getUserFileSystem());
}
}
catch (Exception e) {
// ignore missing content handler
}
}
/**
* Performs merge from current version of versioned file into local checked-out file.
* @param okToUpgrade if true an upgrade will be performed if needed
* @param monitor task monitor
* @throws IOException if an IO or access error occurs
* @throws VersionException if unable to handle domain object version in versioned filesystem.
* If okToUpgrade was false, check exception to see if it can be upgraded
* @throws CancelledException if task monitor cancelled operation
*/
void merge(boolean okToUpgrade, TaskMonitor monitor)
throws IOException, VersionException, CancelledException {
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException("merge permitted within writeable project only");
}
if (parent.getProjectLocator().isTransient()) {
throw new IOException("Merge not permitted for transient project");
}
if (!versionedFileSystem.isOnline()) {
throw new NotConnectedException("Not connected to repository server");
}
if (!isCheckedOut()) {
throw new IOException("File not checked out");
}
if (!(versionedFolderItem instanceof DatabaseItem)) {
throw new IOException("unsupported operation");
}
if (folderItem.getCheckoutVersion() == versionedFolderItem.getCurrentVersion()) {
throw new IOException("Versioned file has not been updated since checkout");
}
if (isChanged()) {
throw new IOException("File has unsaved changes");
}
if (canRecover()) {
throw new IOException("File recovery data exists");
}
verifyCheckout("merge");
if (monitor == null) {
monitor = TaskMonitor.DUMMY;
}
if (busy.getAndSet(true)) {
throw new FileInUseException(name + " is busy");
}
FolderItem tmpItem = null;
DomainObjectAdapterDB inUseDomainObj = null;
projectData.mergeStarted();
try {
inUseDomainObj = getAndLockInUseDomainObjectForMergeUpdate("merge");
if (!modifiedSinceCheckout()) {
// Quick merge
folderItem.updateCheckout(versionedFolderItem, true, monitor);
}
else {
if (SystemUtilities.isInHeadlessMode()) {
throw new IOException("Merge failed, merge is not supported in headless mode");
}
ContentHandler<?> contentHandler = getContentHandler();
// Test versioned file for VersionException
int mergeVer = versionedFolderItem.getCurrentVersion();
if (!okToUpgrade) {
// verify remote version can be opened without verion error
DomainObject testObj = contentHandler.getReadOnlyObject(versionedFolderItem,
mergeVer, false, this, monitor);
testObj.release(this);
}
Msg.info(this, "Merging version " + mergeVer + " for " + name);
// Copy current versioned item to temporary private item
DatabaseItem databaseItem = (DatabaseItem) versionedFolderItem;
BufferFile bufferFile = databaseItem.open(mergeVer);
try {
String tmpName = name + ".merge";
tmpItem = fileSystem.createTemporaryDatabase(parent.getPathname(), tmpName,
databaseItem.getFileID(), bufferFile, databaseItem.getContentType(), false,
monitor);
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
finally {
bufferFile.dispose();
}
int coVer = folderItem.getCheckoutVersion();
long checkoutId = folderItem.getCheckoutId();
tmpItem.setCheckout(checkoutId, folderItem.isCheckedOutExclusive(), mergeVer, 0);
DomainObject mergeObj = contentHandler.getDomainObject(tmpItem, null, -1,
okToUpgrade, false, this, monitor);
DomainObject sourceObj = null;
DomainObject originalObj = null;
DomainObject latestObj = null; // TODO: Is there some way to leverage the buffer file we already copied into tmpItem? Missing required change set
try {
sourceObj = contentHandler.getImmutableObject(folderItem, this,
DomainFile.DEFAULT_VERSION, -1, monitor);
originalObj = contentHandler.getImmutableObject(versionedFolderItem, this,
coVer, -1, monitor);
latestObj = contentHandler.getImmutableObject(versionedFolderItem, this,
mergeVer, coVer, monitor);
DomainObjectMergeManager mergeMgr =
contentHandler.getMergeManager(mergeObj, sourceObj, originalObj, latestObj);
if (!mergeMgr.merge(monitor)) {
Msg.info(this, "Merge terminated for " + name);
return; // error displayed by merge manager
}
mergeObj.save("Merge with version " + mergeVer, monitor);
createKeepFile(sourceObj, monitor);
}
finally {
release(mergeObj);
release(sourceObj);
release(originalObj);
release(latestObj);
}
// Update folder item
folderItem.updateCheckout(tmpItem, mergeVer);
versionedFolderItem.updateCheckoutVersion(checkoutId, mergeVer,
ClientUtil.getUserName());
tmpItem = null;
Msg.info(this, "Merge completed for " + name);
if (inUseDomainObj != null) {
contentHandler.resetDBSourceFile(folderItem, inUseDomainObj);
}
}
if (inUseDomainObj != null) {
inUseDomainObj.invalidate();
}
}
finally {
unlockDomainObject(inUseDomainObj);
busy.set(false);
try {
if (tmpItem != null) {
try {
tmpItem.delete(-1, ClientUtil.getUserName());
}
catch (IOException e) {
Msg.error(this, "IO error", e);
}
}
parent.fileChanged(name);
if (parent.visited()) {
parent.refresh(false, true, null);
}
}
finally {
projectData.mergeEnded();
}
}
}
/**
* Move this file into the newParent folder.
* @param newParent new parent folder within the same project
* @return the newly relocated domain file (the original DomainFile object becomes invalid since it is immutable)
* @throws DuplicateFileException if a file with the same name
* already exists in newParent folder.
* @throws FileInUseException if this file is in-use / checked-out.
* @throws IOException if an IO or access error occurs.
*/
GhidraFile moveTo(GhidraFolderData newParent) throws IOException {
synchronized (fileSystem) {
if (newParent.getLocalFileSystem() != fileSystem) {
throw new IllegalArgumentException("moveTo permitted within same project only");
}
if (fileSystem.isReadOnly()) {
throw new ReadOnlyException("moveTo permitted within writeable project only");
}
if (getParent().getPathname().equals(newParent.getPathname())) {
throw new IllegalArgumentException("newParent must differ from current parent");
}
checkInUse();
GhidraFolderData oldParent = parent;
String oldName = name;
String newName = newParent.getTargetName(name);
try {
if (isHijacked()) {
fileSystem.moveItem(parent.getPathname(), name, newParent.getPathname(),
newName);
parent.fileChanged(name);
newParent.fileChanged(newName);
return newParent.getDomainFile(newName);
}
else if (versionedFolderItem == null) {
if (!isCheckedOut()) {
fileSystem.moveItem(parent.getPathname(), name, newParent.getPathname(),
newName);
folderItem = fileSystem.getItem(newParent.getPathname(), newName);
}
else {
throw new FileInUseException(name + " is checked-out");
}
}
else {
versionedFileSystem.moveItem(parent.getPathname(), name,
newParent.getPathname(), newName);
versionedFolderItem =
versionedFileSystem.getItem(newParent.getPathname(), newName);
}
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
parent = newParent;
name = newName;
oldParent.fileMoved(newParent, oldName, newName);
return newParent.getDomainFile(newName);
}
}
/**
* Determine if this file is a link file which corresponds to either a file or folder link.
* The {@link DomainObject} referenced by a link-file may be opened using
* {@link #getReadOnlyDomainObject(Object, int, TaskMonitor)}. The
* {@link #getDomainObject(Object, boolean, boolean, TaskMonitor)} method may also be used
* to obtain a read-only instance. {@link #getImmutableDomainObject(Object, int, TaskMonitor)}
* use is not supported.
* The URL stored within the link-file may be read using {@link #getLinkFileURL()}.
* The content type (see {@link #getContentType()} of a link file will differ from that of the
* linked object (e.g., "LinkedProgram" vs "Program").
* @return true if link file else false for a normal domain file
*/
boolean isLinkFile() {
synchronized (fileSystem) {
try {
return LinkHandler.class.isAssignableFrom(getContentHandler().getClass());
}
catch (IOException e) {
return false;
}
}
}
/**
* Get URL associated with a link-file. The URL returned may reference either a folder
* or a file within another project/repository.
* @return link-file URL or null if not a link-file
* @throws IOException if an IO error occurs
*/
URL getLinkFileURL() throws IOException {
if (!isLinkFile()) {
return null;
}
FolderItem item = folderItem != null ? folderItem : versionedFolderItem;
return LinkHandler.getURL(item);
}
/**
* Determine if this file's content type supports linking.
* @return true if linking is supported allowing a link-file to be created which
* references this file, else false.
*/
boolean isLinkingSupported() {
synchronized (fileSystem) {
try {
return getContentHandler().getLinkHandler() != null;
}
catch (IOException e) {
return false; // ignore error
}
}
}
/**
* Copy this file into the newParent folder as a link file. Restrictions:
* <ul>
* <li>Specified newParent must reside within a different project since internal linking is
* not currently supported. </li>
* <li>Content type must support linking (see {@link #isLinkingSupported()}).</li>
* </ul>
* If this file is associated with a temporary transient project (i.e., not a locally
* managed project) the generated link will refer to the remote file with a remote
* Ghidra URL, otherwise a local project storage path will be used.
* @param newParent new parent folder
* @return newly created domain file or null if content type does not support link use.
* @throws IOException if an IO or access error occurs.
*/
DomainFile copyToAsLink(GhidraFolderData newParent) throws IOException {
synchronized (fileSystem) {
LinkHandler<?> lh = getContentHandler().getLinkHandler();
if (lh == null) {
return null;
}
return newParent.copyAsLink(projectData, getPathname(), name, lh);
}
}
/**
* Copy this file into the newParent folder as a private file.
* @param newParent new parent folder
* @param monitor task monitor
* @return newly created domain file
* @throws FileInUseException if this file is in-use / checked-out.
* @throws IOException if an IO or access error occurs.
* @throws CancelledException if task monitor cancelled operation.
*/
GhidraFile copyTo(GhidraFolderData newParent, TaskMonitor monitor)
throws IOException, CancelledException {
synchronized (fileSystem) {
if (newParent.getLocalFileSystem().isReadOnly()) {
throw new ReadOnlyException("copyVersionTo permitted to writeable project only");
}
FolderItem item = folderItem != null ? folderItem : versionedFolderItem;
String pathname = newParent.getPathname();
String contentType = item.getContentType();
String targetName = newParent.getTargetName(name);
String user = ClientUtil.getUserName();
try {
if (item instanceof DatabaseItem) {
BufferFile bufferFile = ((DatabaseItem) item).open();
try {
newParent.getLocalFileSystem()
.createDatabase(pathname, targetName, FileIDFactory.createFileID(),
bufferFile, null, contentType, true, monitor, user);
}
finally {
bufferFile.dispose();
}
}
else if (item instanceof DataFileItem) {
InputStream istream = ((DataFileItem) item).getInputStream();
try {
newParent.getLocalFileSystem()
.createDataFile(pathname, targetName, istream, null, contentType,
monitor);
}
finally {
istream.close();
}
}
else {
throw new AssertException("Unknown Item in copyTo");
}
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
newParent.fileChanged(targetName);
return newParent.getDomainFile(targetName);
}
}
/**
* Copy a specific version of this file to the specified destFolder.
* @param version version to copy
* @param destFolder destination parent folder
* @param monitor task monitor
* @return the copied file
* @throws IOException if an IO or access error occurs.
* @throws CancelledException if task monitor cancelled operation.
*/
GhidraFile copyVersionTo(int version, GhidraFolderData destFolder, TaskMonitor monitor)
throws IOException, CancelledException {
synchronized (fileSystem) {
if (destFolder.getLocalFileSystem().isReadOnly()) {
throw new ReadOnlyException("copyVersionTo permitted to writeable project");
}
if (versionedFolderItem == null) {
return null; // NOTE: versioned file system may be offline
}
if (!(versionedFolderItem instanceof DatabaseItem)) {
throw new IOException("unsupported operation");
}
String pathname = destFolder.getPathname();
String contentType = versionedFolderItem.getContentType();
String targetName = destFolder.getTargetName(name + "_v" + version);
String user = ClientUtil.getUserName();
try {
BufferFile bufferFile = ((DatabaseItem) versionedFolderItem).open(version);
if (bufferFile == null) {
return null; // TODO: not sure this can ever happen - IOException will probably occur instead
}
try {
destFolder.getLocalFileSystem()
.createDatabase(pathname, targetName, FileIDFactory.createFileID(),
bufferFile, null, contentType, true, monitor, user);
}
finally {
bufferFile.dispose();
}
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
destFolder.fileChanged(targetName);
return destFolder.getDomainFile(targetName);
}
}
/**
* Copy this file to make a private file if it is versioned. This method should be called
* only when a non shared project is being converted to a shared project.
* @param monitor task monitor
* @throws IOException if an IO error occurs
* @throws CancelledException if task is cancelled
*/
void convertToPrivateFile(TaskMonitor monitor) throws IOException, CancelledException {
synchronized (fileSystem) {
if (!(versionedFileSystem instanceof LocalFileSystem)) {
throw new UnsupportedOperationException("not supported for project");
}
if (!isVersioned()) {
return;
}
GhidraFolderData oldParent = getParent();
if (isCheckedOut()) {
// keep local changed file - discard revision information
folderItem.clearCheckout();
oldParent.fileChanged(name);
}
else {
// copy this file to make a private copy
GhidraFile df = copyTo(oldParent, monitor);
versionedFolderItem.delete(-1, ClientUtil.getUserName());
oldParent.fileChanged(name);
try {
df.setName(name);
}
catch (InvalidNameException e) {
throw new AssertException("Unexpected error", e);
}
}
}
}
/**
* Pack domain file into specified file.
* Specified file will be overwritten if it already exists.
* @param file destination file
* @param monitor the task monitor
* @throws IOException if there is an exception packing the file
* @throws CancelledException if monitor cancels operation
*/
void packFile(File file, TaskMonitor monitor) throws IOException, CancelledException {
synchronized (fileSystem) {
FolderItem item = folderItem != null ? folderItem : versionedFolderItem;
item.output(file, FolderItem.LATEST_VERSION, monitor);
}
}
/**
* Returns the length of this domain file. This size is the minimum disk space
* used for storing this file, but does not account for additional storage space
* used to track changes, etc.
* @return file length
* @throws IOException if IO or access error occurs
*/
long length() throws IOException {
synchronized (fileSystem) {
if (folderItem != null) {
return folderItem.length();
}
if (versionedFolderItem != null) {
return versionedFolderItem.length();
}
return 0;
}
}
/**
* Returns an ordered map containing the metadata that has been associated with the
* corresponding domain object. The map contains key,value pairs and are ordered by their
* insertion order.
* @return a map containing the metadata that has been associated with the corresponding domain
* object.
*/
Map<String, String> getMetadata() {
FolderItem item = (folderItem != null) ? folderItem : versionedFolderItem;
return getMetadata(item);
}
/**
* Returns an ordered map containing the metadata stored within a specific {@link FolderItem}
* database. The map contains key,value pairs and are ordered by their insertion order.
* @param item folder item whose metadata should be read
* @return a map containing the metadata that has been associated with the corresponding domain
* object. Map will be empty for a non-database item.
*/
static Map<String, String> getMetadata(FolderItem item) {
if (!(item instanceof DatabaseItem databaseItem)) {
return new HashMap<>();
}
ManagedBufferFile bf = null;
DBHandle dbh = null;
GenericDomainObjectDB genericDomainObj = null;
try {
bf = databaseItem.open();
dbh = new DBHandle(bf);
genericDomainObj = new GenericDomainObjectDB(dbh);
return genericDomainObj.getMetadata();
}
catch (FileNotFoundException e) {
// file has been deleted, just return an empty map.
}
catch (Field.UnsupportedFieldException e) {
// file created with newer version of Ghidra
}
catch (IOException e) {
Msg.error(GhidraFileData.class, "Read meta-data error", e);
}
finally {
if (genericDomainObj != null) {
genericDomainObj.release();
}
if (dbh != null) {
dbh.close();
}
if (bf != null) {
bf.dispose();
}
}
return new HashMap<>();
}
@Override
public String toString() {
if (projectData == null) {
return name + "(disposed)";
}
ProjectLocator projectLocator = projectData.getProjectLocator();
if (projectLocator.isTransient()) {
return projectData.getProjectLocator().getName() + getPathname();
}
return projectData.getProjectLocator().getName() + ":" + getPathname();
}
private static class GenericDomainObjectDB extends DomainObjectAdapterDB {
protected GenericDomainObjectDB(DBHandle dbh) throws IOException {
super(dbh, "Generic", 500, dbh);
loadMetadata();
}
@Override
public String getDescription() {
return "Generic Database Domain Object";
}
@Override
public boolean isChangeable() {
return false;
}
public void release() {
release(dbh);
}
}
}
/**
* {@link VersionIcon} is the base icon for files which exist within the versioned filesystem.
*/
class VersionIcon implements Icon {
private static Color VERSION_ICON_COLOR = new GColor("color.bg.icon.versioned");
private static final int WIDTH = GhidraFileData.ICON_WIDTH;
private static final int HEIGHT = GhidraFileData.ICON_HEIGHT;
@Override
public int getIconHeight() {
return HEIGHT;
}
@Override
public int getIconWidth() {
return WIDTH;
}
@Override
public void paintIcon(Component c, Graphics g, int x, int y) {
g.setColor(VERSION_ICON_COLOR);
g.fillRect(x + 1, y + 1, WIDTH - 2, HEIGHT - 2);
g.drawLine(x + 1, y, x + WIDTH - 2, y);
g.drawLine(x + WIDTH - 1, y + 1, x + WIDTH - 1, y + HEIGHT - 2);
g.drawLine(x + 1, y + HEIGHT - 1, x + WIDTH - 2, y + HEIGHT - 1);
g.drawLine(x, y + 1, x, y + HEIGHT - 2);
}
}
| NationalSecurityAgency/ghidra | Ghidra/Framework/Project/src/main/java/ghidra/framework/data/GhidraFileData.java |
1,433 | /*
* This file is part of WebGoat, an Open Web Application Security Project utility. For details, please see http://www.owasp.org/
*
* Copyright (c) 2002 - 2021 Bruce Mayhew
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU General Public License as published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with this program; if
* not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* Getting Source
* ==============
*
* Source for this application is maintained at https://github.com/WebGoat/WebGoat, a repository for free software projects.
*/
package org.owasp.webgoat.lessons.hijacksession;
import org.owasp.webgoat.container.lessons.Category;
import org.owasp.webgoat.container.lessons.Lesson;
import org.springframework.stereotype.Component;
/***
*
* @author Angel Olle Blazquez
*
*/
@Component
public class HijackSession extends Lesson {
@Override
public Category getDefaultCategory() {
return Category.A1;
}
@Override
public String getTitle() {
return "hijacksession.title";
}
}
| endpointlabs/WebGoat | src/main/java/org/owasp/webgoat/lessons/hijacksession/HijackSession.java |
1,434 | package org.testng;
/**
* A parameter of this type will be passed to the run() method of a IHookable. Invoking
* runTestMethod() on that parameter will cause the test method currently being diverted to be
* invoked.
*
* <p><b>This interface is not meant to be implemented by clients, only by TestNG.</b>
*
* @see org.testng.IHookable
* @author cbeust Jan 28, 2006
*/
public interface IHookCallBack {
/**
* Invoke the test method currently being hijacked.
*
* @param testResult The test result
*/
void runTestMethod(ITestResult testResult);
/** @return the parameters that will be used to invoke the test method. */
Object[] getParameters();
}
| testng-team/testng | testng-core-api/src/main/java/org/testng/IHookCallBack.java |
1,435 | package org.testng;
/**
* A parameter of this type will be passed to the run() method of a IConfigurable. Invoking
* runConfigurationMethod() on that parameter will cause the test method currently being diverted to
* be invoked.
*
* <p><b>This interface is not meant to be implemented by clients, only by TestNG.</b>
*
* @see org.testng.IConfigurable
* @author cbeust Sep 07, 2010
*/
public interface IConfigureCallBack {
/**
* Invoke the test method currently being hijacked.
*
* @param testResult The test result
*/
void runConfigurationMethod(ITestResult testResult);
/** @return the parameters that will be used to invoke the configuration method. */
Object[] getParameters();
}
| testng-team/testng | testng-core-api/src/main/java/org/testng/IConfigureCallBack.java |
1,436 | /*
* This file is part of WebGoat, an Open Web Application Security Project utility. For details, please see http://www.owasp.org/
*
* Copyright (c) 2002 - 2021 Bruce Mayhew
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU General Public License as published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with this program; if
* not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* Getting Source ==============
*
* Source for this application is maintained at https://github.com/WebGoat/WebGoat, a repository for free software projects.
*/
package org.owasp.webgoat.lessons.hijacksession;
import jakarta.servlet.http.Cookie;
import jakarta.servlet.http.HttpServletResponse;
import org.apache.commons.lang3.StringUtils;
import org.owasp.webgoat.container.assignments.AssignmentEndpoint;
import org.owasp.webgoat.container.assignments.AssignmentHints;
import org.owasp.webgoat.container.assignments.AttackResult;
import org.owasp.webgoat.lessons.hijacksession.cas.Authentication;
import org.owasp.webgoat.lessons.hijacksession.cas.HijackSessionAuthenticationProvider;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.CookieValue;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
/***
*
* @author Angel Olle Blazquez
*
*/
@RestController
@AssignmentHints({
"hijacksession.hints.1",
"hijacksession.hints.2",
"hijacksession.hints.3",
"hijacksession.hints.4",
"hijacksession.hints.5"
})
public class HijackSessionAssignment extends AssignmentEndpoint {
private static final String COOKIE_NAME = "hijack_cookie";
@Autowired HijackSessionAuthenticationProvider provider;
@PostMapping(path = "/HijackSession/login")
@ResponseBody
public AttackResult login(
@RequestParam String username,
@RequestParam String password,
@CookieValue(value = COOKIE_NAME, required = false) String cookieValue,
HttpServletResponse response) {
Authentication authentication;
if (StringUtils.isEmpty(cookieValue)) {
authentication =
provider.authenticate(
Authentication.builder().name(username).credentials(password).build());
setCookie(response, authentication.getId());
} else {
authentication = provider.authenticate(Authentication.builder().id(cookieValue).build());
}
if (authentication.isAuthenticated()) {
return success(this).build();
}
return failed(this).build();
}
private void setCookie(HttpServletResponse response, String cookieValue) {
Cookie cookie = new Cookie(COOKIE_NAME, cookieValue);
cookie.setPath("/WebGoat");
cookie.setSecure(true);
response.addCookie(cookie);
}
}
| endpointlabs/WebGoat | src/main/java/org/owasp/webgoat/lessons/hijacksession/HijackSessionAssignment.java |
1,437 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jooq.tools;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* <p>
* Operations on {@link java.lang.String} that are <code>null</code> safe.
* </p>
* <ul>
* <li><b>IsEmpty/IsBlank</b> - checks if a String contains text</li>
* <li><b>Trim/Strip</b> - removes leading and trailing whitespace</li>
* <li><b>Equals</b> - compares two strings null-safe</li>
* <li><b>startsWith</b> - check if a String starts with a prefix null-safe</li>
* <li><b>endsWith</b> - check if a String ends with a suffix null-safe</li>
* <li><b>IndexOf/LastIndexOf/Contains</b> - null-safe index-of checks
* <li><b>IndexOfAny/LastIndexOfAny/IndexOfAnyBut/LastIndexOfAnyBut</b> -
* index-of any of a set of Strings</li>
* <li><b>ContainsOnly/ContainsNone/ContainsAny</b> - does String contains
* only/none/any of these characters</li>
* <li><b>Substring/Left/Right/Mid</b> - null-safe substring extractions</li>
* <li><b>SubstringBefore/SubstringAfter/SubstringBetween</b> - substring
* extraction relative to other strings</li>
* <li><b>Split/Join</b> - splits a String into an array of substrings and vice
* versa</li>
* <li><b>Remove/Delete</b> - removes part of a String</li>
* <li><b>Replace/Overlay</b> - Searches a String and replaces one String with
* another</li>
* <li><b>Chomp/Chop</b> - removes the last part of a String</li>
* <li><b>LeftPad/RightPad/Center/Repeat</b> - pads a String</li>
* <li><b>UpperCase/LowerCase/SwapCase/Capitalize/Uncapitalize</b> - changes the
* case of a String</li>
* <li><b>CountMatches</b> - counts the number of occurrences of one String in
* another</li>
* <li><b>IsAlpha/IsNumeric/IsWhitespace/IsAsciiPrintable</b> - checks the
* characters in a String</li>
* <li><b>DefaultString</b> - protects against a null input String</li>
* <li><b>Reverse/ReverseDelimited</b> - reverses a String</li>
* <li><b>Abbreviate</b> - abbreviates a string using ellipsis</li>
* <li><b>Difference</b> - compares Strings and reports on their differences</li>
* <li><b>LevensteinDistance</b> - the number of changes needed to change one
* String into another</li>
* </ul>
* <p>
* The <code>StringUtils</code> class defines certain words related to String
* handling.
* </p>
* <ul>
* <li>null - <code>null</code></li>
* <li>empty - a zero-length string (<code>""</code>)</li>
* <li>space - the space character (<code>' '</code>, char 32)</li>
* <li>whitespace - the characters defined by
* {@link Character#isWhitespace(char)}</li>
* <li>trim - the characters <= 32 as in {@link String#trim()}</li>
* </ul>
* <p>
* <code>StringUtils</code> handles <code>null</code> input Strings quietly.
* That is to say that a <code>null</code> input will return <code>null</code>.
* Where a <code>boolean</code> or <code>int</code> is being returned details
* vary by method.
* </p>
* <p>
* A side effect of the <code>null</code> handling is that a
* <code>NullPointerException</code> should be considered a bug in
* <code>StringUtils</code> (except for deprecated methods).
* </p>
* <p>
* Methods in this class give sample code to explain their operation. The symbol
* <code>*</code> is used to indicate any input including <code>null</code>.
* </p>
*
* @see java.lang.String
* @author Apache Software Foundation
* @author <a href="http://jakarta.apache.org/turbine/">Apache Jakarta
* Turbine</a>
* @author <a href="mailto:[email protected]">Jon S. Stevens</a>
* @author Daniel L. Rall
* @author <a href="mailto:[email protected]">Greg Coladonato</a>
* @author <a href="mailto:[email protected]">Ed Korthof</a>
* @author <a href="mailto:[email protected]">Rand McNeely</a>
* @author <a href="mailto:[email protected]">Fredrik Westermarck</a>
* @author Holger Krauth
* @author <a href="mailto:[email protected]">Alexander Day Chaffee</a>
* @author <a href="mailto:[email protected]">Henning P. Schmiedehausen</a>
* @author Arun Mammen Thomas
* @author Gary Gregory
* @author Phil Steitz
* @author Al Chou
* @author Michael Davey
* @author Reuben Sivan
* @author Chris Hyzer
* @author Scott Johnson
* @since 1.0
* @version $Id: StringUtils.java 911986 2010-02-19 21:19:05Z niallp $
*/
public final class StringUtils {
/**
* The empty String {@code ""}.
* @since 2.0
*/
public static final String EMPTY = "";
/**
* Represents a failed index search.
* @since 2.1
*/
public static final int INDEX_NOT_FOUND = -1;
/**
* <p>The maximum size to which the padding constant(s) can expand.</p>
*/
private static final int PAD_LIMIT = 8192;
// Defaults
// -----------------------------------------------------------------------
/**
* <p>
* Returns either the passed in String, or if the String is
* <code>null</code>, an empty String ("").
* </p>
*
* <pre>
* StringUtils.defaultString(null) = ""
* StringUtils.defaultString("") = ""
* StringUtils.defaultString("bat") = "bat"
* </pre>
*
* @see String#valueOf(Object)
* @param str the String to check, may be null
* @return the passed in String, or the empty String if it was
* <code>null</code>
*/
public static String defaultString(String str) {
return str == null ? "" : str;
}
/**
* <p>Returns either the passed in String, or if the String is
* <code>null</code>, the value of <code>defaultStr</code>.</p>
*
* <pre>
* StringUtils.defaultString(null, "NULL") = "NULL"
* StringUtils.defaultString("", "NULL") = ""
* StringUtils.defaultString("bat", "NULL") = "bat"
* </pre>
*
* @see String#valueOf(Object)
* @param str the String to check, may be null
* @param defaultStr the default String to return
* if the input is <code>null</code>, may be null
* @return the passed in String, or the default if it was <code>null</code>
*/
public static String defaultString(String str, String defaultStr) {
return str == null ? defaultStr : str;
}
/**
* <p>Returns either the passed in String, or if the String is
* empty or <code>null</code>, the value of <code>defaultStr</code>.</p>
*
* <pre>
* StringUtils.defaultIfEmpty(null, "NULL") = "NULL"
* StringUtils.defaultIfEmpty("", "NULL") = "NULL"
* StringUtils.defaultIfEmpty("bat", "NULL") = "bat"
* </pre>
*
* @see StringUtils#defaultString(String, String)
* @param str the String to check, may be null
* @param defaultStr the default String to return
* if the input is empty ("") or <code>null</code>, may be null
* @return the passed in String, or the default
*/
public static String defaultIfEmpty(String str, String defaultStr) {
return StringUtils.isEmpty(str) ? defaultStr : str;
}
/**
* <p>Returns either the passed in CharSequence, or if the CharSequence is
* whitespace, empty ("") or {@code null}, the value of {@code defaultStr}.</p>
*
* <pre>
* StringUtils.defaultIfBlank(null, "NULL") = "NULL"
* StringUtils.defaultIfBlank("", "NULL") = "NULL"
* StringUtils.defaultIfBlank(" ", "NULL") = "NULL"
* StringUtils.defaultIfBlank("bat", "NULL") = "bat"
* StringUtils.defaultIfBlank("", null) = null
* </pre>
* @param str the CharSequence to check, may be null
* @param defaultStr the default CharSequence to return
* if the input is whitespace, empty ("") or {@code null}, may be null
* @return the passed in CharSequence, or the default
* @see StringUtils#defaultString(String, String)
*/
public static String defaultIfBlank(String str, String defaultStr) {
return StringUtils.isBlank(str) ? defaultStr : str;
}
// Empty checks
// -----------------------------------------------------------------------
/**
* <p>
* Checks if a String is empty ("") or null.
* </p>
*
* <pre>
* StringUtils.isEmpty(null) = true
* StringUtils.isEmpty("") = true
* StringUtils.isEmpty(" ") = false
* StringUtils.isEmpty("bob") = false
* StringUtils.isEmpty(" bob ") = false
* </pre>
* <p>
* NOTE: This method changed in Lang version 2.0. It no longer trims the
* String. That functionality is available in isBlank().
* </p>
*
* @param str the String to check, may be null
* @return <code>true</code> if the String is empty or null
*/
public static boolean isEmpty(String str) {
return str == null || str.length() == 0;
}
/**
* <p>
* Checks if a String is whitespace, empty ("") or null.
* </p>
*
* <pre>
* StringUtils.isBlank(null) = true
* StringUtils.isBlank("") = true
* StringUtils.isBlank(" ") = true
* StringUtils.isBlank("bob") = false
* StringUtils.isBlank(" bob ") = false
* </pre>
*
* @param str the String to check, may be null
* @return <code>true</code> if the String is null, empty or whitespace
* @since 2.0
*/
public static boolean isBlank(String str) {
int strLen;
if (str == null || (strLen = str.length()) == 0) {
return true;
}
for (int i = 0; i < strLen; i++) {
if ((Character.isWhitespace(str.charAt(i)) == false)) {
return false;
}
}
return true;
}
// Count matches
// -----------------------------------------------------------------------
/**
* <p>
* Counts how many times the substring appears in the larger String.
* </p>
* <p>
* A <code>null</code> or empty ("") String input returns <code>0</code>.
* </p>
*
* <pre>
* StringUtils.countMatches(null, *) = 0
* StringUtils.countMatches("", *) = 0
* StringUtils.countMatches("abba", null) = 0
* StringUtils.countMatches("abba", "") = 0
* StringUtils.countMatches("abba", "a") = 2
* StringUtils.countMatches("abba", "ab") = 1
* StringUtils.countMatches("abba", "xxx") = 0
* </pre>
*
* @param str the String to check, may be null
* @param sub the substring to count, may be null
* @return the number of occurrences, 0 if either String is
* <code>null</code>
*/
public static int countMatches(String str, String sub) {
if (isEmpty(str) || isEmpty(sub)) {
return 0;
}
int count = 0;
int idx = 0;
while ((idx = str.indexOf(sub, idx)) != -1) {
count++;
idx += sub.length();
}
return count;
}
// Padding
// -----------------------------------------------------------------------
/**
* <p>Right pad a String with spaces (' ').</p>
*
* <p>The String is padded to the size of <code>size</code>.</p>
*
* <pre>
* StringUtils.rightPad(null, *) = null
* StringUtils.rightPad("", 3) = " "
* StringUtils.rightPad("bat", 3) = "bat"
* StringUtils.rightPad("bat", 5) = "bat "
* StringUtils.rightPad("bat", 1) = "bat"
* StringUtils.rightPad("bat", -1) = "bat"
* </pre>
*
* @param str the String to pad out, may be null
* @param size the size to pad to
* @return right padded String or original String if no padding is necessary,
* <code>null</code> if null String input
*/
public static String rightPad(String str, int size) {
return rightPad(str, size, ' ');
}
public static String rightPad(String str, int strSize, int size) {
return rightPad(str, strSize, size, ' ');
}
/**
* <p>Right pad a String with a specified character.</p>
*
* <p>The String is padded to the size of <code>size</code>.</p>
*
* <pre>
* StringUtils.rightPad(null, *, *) = null
* StringUtils.rightPad("", 3, 'z') = "zzz"
* StringUtils.rightPad("bat", 3, 'z') = "bat"
* StringUtils.rightPad("bat", 5, 'z') = "batzz"
* StringUtils.rightPad("bat", 1, 'z') = "bat"
* StringUtils.rightPad("bat", -1, 'z') = "bat"
* </pre>
*
* @param str the String to pad out, may be null
* @param size the size to pad to
* @param padChar the character to pad with
* @return right padded String or original String if no padding is necessary,
* <code>null</code> if null String input
* @since 2.0
*/
public static String rightPad(String str, int size, char padChar) {
if (str == null)
return null;
return rightPad(str, str.length(), size, padChar);
}
public static String rightPad(String str, int strSize, int size, char padChar) {
if (str == null) {
return null;
}
int pads = size - strSize;
if (pads <= 0) {
return str; // returns original String when possible
}
if (pads > PAD_LIMIT) {
return rightPad(str, strSize, size, String.valueOf(padChar));
}
return str.concat(padding(pads, padChar));
}
/**
* <p>Right pad a String with a specified String.</p>
*
* <p>The String is padded to the size of <code>size</code>.</p>
*
* <pre>
* StringUtils.rightPad(null, *, *) = null
* StringUtils.rightPad("", 3, "z") = "zzz"
* StringUtils.rightPad("bat", 3, "yz") = "bat"
* StringUtils.rightPad("bat", 5, "yz") = "batyz"
* StringUtils.rightPad("bat", 8, "yz") = "batyzyzy"
* StringUtils.rightPad("bat", 1, "yz") = "bat"
* StringUtils.rightPad("bat", -1, "yz") = "bat"
* StringUtils.rightPad("bat", 5, null) = "bat "
* StringUtils.rightPad("bat", 5, "") = "bat "
* </pre>
*
* @param str the String to pad out, may be null
* @param size the size to pad to
* @param padStr the String to pad with, null or empty treated as single space
* @return right padded String or original String if no padding is necessary,
* <code>null</code> if null String input
*/
public static String rightPad(String str, int size, String padStr) {
if (str == null)
return null;
return rightPad(str, str.length(), size, padStr);
}
public static String rightPad(String str, int strSize, int size, String padStr) {
if (str == null) {
return null;
}
if (isEmpty(padStr)) {
padStr = " ";
}
int padLen = padStr.length();
int strLen = strSize;
int pads = size - strLen;
if (pads <= 0) {
return str; // returns original String when possible
}
if (padLen == 1 && pads <= PAD_LIMIT) {
return rightPad(str, size, padStr.charAt(0));
}
if (pads == padLen) {
return str.concat(padStr);
} else if (pads < padLen) {
return str.concat(padStr.substring(0, pads));
} else {
char[] padding = new char[pads];
char[] padChars = padStr.toCharArray();
for (int i = 0; i < pads; i++) {
padding[i] = padChars[i % padLen];
}
return str.concat(new String(padding));
}
}
/**
* <p>Left pad a String with spaces (' ').</p>
*
* <p>The String is padded to the size of <code>size</code>.</p>
*
* <pre>
* StringUtils.leftPad(null, *) = null
* StringUtils.leftPad("", 3) = " "
* StringUtils.leftPad("bat", 3) = "bat"
* StringUtils.leftPad("bat", 5) = " bat"
* StringUtils.leftPad("bat", 1) = "bat"
* StringUtils.leftPad("bat", -1) = "bat"
* </pre>
*
* @param str the String to pad out, may be null
* @param size the size to pad to
* @return left padded String or original String if no padding is necessary,
* <code>null</code> if null String input
*/
public static String leftPad(String str, int size) {
return leftPad(str, size, ' ');
}
public static String leftPad(String str, int strSize, int size) {
return leftPad(str, strSize, size, ' ');
}
/**
* <p>Left pad a String with a specified character.</p>
*
* <p>Pad to a size of <code>size</code>.</p>
*
* <pre>
* StringUtils.leftPad(null, *, *) = null
* StringUtils.leftPad("", 3, 'z') = "zzz"
* StringUtils.leftPad("bat", 3, 'z') = "bat"
* StringUtils.leftPad("bat", 5, 'z') = "zzbat"
* StringUtils.leftPad("bat", 1, 'z') = "bat"
* StringUtils.leftPad("bat", -1, 'z') = "bat"
* </pre>
*
* @param str the String to pad out, may be null
* @param size the size to pad to
* @param padChar the character to pad with
* @return left padded String or original String if no padding is necessary,
* <code>null</code> if null String input
* @since 2.0
*/
public static String leftPad(String str, int size, char padChar) {
if (str == null)
return null;
return leftPad(str, str.length(), size, padChar);
}
public static String leftPad(String str, int strSize, int size, char padChar) {
if (str == null) {
return null;
}
int pads = size - strSize;
if (pads <= 0) {
return str; // returns original String when possible
}
if (pads > PAD_LIMIT) {
return leftPad(str, strSize, size, String.valueOf(padChar));
}
return padding(pads, padChar).concat(str);
}
/**
* <p>Left pad a String with a specified String.</p>
*
* <p>Pad to a size of <code>size</code>.</p>
*
* <pre>
* StringUtils.leftPad(null, *, *) = null
* StringUtils.leftPad("", 3, "z") = "zzz"
* StringUtils.leftPad("bat", 3, "yz") = "bat"
* StringUtils.leftPad("bat", 5, "yz") = "yzbat"
* StringUtils.leftPad("bat", 8, "yz") = "yzyzybat"
* StringUtils.leftPad("bat", 1, "yz") = "bat"
* StringUtils.leftPad("bat", -1, "yz") = "bat"
* StringUtils.leftPad("bat", 5, null) = " bat"
* StringUtils.leftPad("bat", 5, "") = " bat"
* </pre>
*
* @param str the String to pad out, may be null
* @param size the size to pad to
* @param padStr the String to pad with, null or empty treated as single space
* @return left padded String or original String if no padding is necessary,
* <code>null</code> if null String input
*/
public static String leftPad(String str, int size, String padStr) {
if (str == null)
return null;
return leftPad(str, str.length(), size, padStr);
}
public static String leftPad(String str, int strSize, int size, String padStr) {
if (str == null) {
return null;
}
if (isEmpty(padStr)) {
padStr = " ";
}
int padLen = padStr.length();
int strLen = strSize;
int pads = size - strLen;
if (pads <= 0) {
return str; // returns original String when possible
}
if (padLen == 1 && pads <= PAD_LIMIT) {
return leftPad(str, size, padStr.charAt(0));
}
if (pads == padLen) {
return padStr.concat(str);
} else if (pads < padLen) {
return padStr.substring(0, pads).concat(str);
} else {
char[] padding = new char[pads];
char[] padChars = padStr.toCharArray();
for (int i = 0; i < pads; i++) {
padding[i] = padChars[i % padLen];
}
return new String(padding).concat(str);
}
}
/**
* <p>Returns padding using the specified delimiter repeated
* to a given length.</p>
*
* <pre>
* StringUtils.padding(0, 'e') = ""
* StringUtils.padding(3, 'e') = "eee"
* StringUtils.padding(-2, 'e') = IndexOutOfBoundsException
* </pre>
*
* <p>Note: this method doesn't not support padding with
* <a href="http://www.unicode.org/glossary/#supplementary_character">Unicode Supplementary Characters</a>
* as they require a pair of <code>char</code>s to be represented.
* If you are needing to support full I18N of your applications
* consider using {@link #repeat(String, int)} instead.
* </p>
*
* @param repeat number of times to repeat delim
* @param padChar character to repeat
* @return String with repeated character
* @throws IndexOutOfBoundsException if <code>repeat < 0</code>
*/
private static String padding(int repeat, char padChar) throws IndexOutOfBoundsException {
if (repeat < 0) {
throw new IndexOutOfBoundsException("Cannot pad a negative amount: " + repeat);
}
final char[] buf = new char[repeat];
for (int i = 0; i < buf.length; i++) {
buf[i] = padChar;
}
return new String(buf);
}
// Abbreviating
//-----------------------------------------------------------------------
/**
* <p>Abbreviates a String using ellipses. This will turn
* "Now is the time for all good men" into "Now is the time for..."</p>
*
* <p>Specifically:
* <ul>
* <li>If <code>str</code> is less than <code>maxWidth</code> characters
* long, return it.</li>
* <li>Else abbreviate it to <code>(substring(str, 0, max-3) + "…")</code>.</li>
* <li>If <code>maxWidth</code> is less than <code>4</code>, throw an
* <code>IllegalArgumentException</code>.</li>
* <li>In no case will it return a String of length greater than
* <code>maxWidth</code>.</li>
* </ul>
* <pre>
* StringUtils.abbreviate(null, *) = null
* StringUtils.abbreviate("", 4) = ""
* StringUtils.abbreviate("abcdefg", 6) = "abc..."
* StringUtils.abbreviate("abcdefg", 7) = "abcdefg"
* StringUtils.abbreviate("abcdefg", 8) = "abcdefg"
* StringUtils.abbreviate("abcdefg", 4) = "a..."
* StringUtils.abbreviate("abcdefg", 3) = IllegalArgumentException
* </pre>
*
* @param str the String to check, may be null
* @param maxWidth maximum length of result String, must be at least 4
* @return abbreviated String, <code>null</code> if null String input
* @throws IllegalArgumentException if the width is too small
* @since 2.0
*/
public static String abbreviate(String str, int maxWidth) {
return abbreviate(str, 0, maxWidth);
}
/**
* <p>Abbreviates a String using ellipses. This will turn
* "Now is the time for all good men" into "...is the time for..."</p>
*
* <p>Works like <code>abbreviate(String, int)</code>, but allows you to specify
* a "left edge" offset. Note that this left edge is not necessarily going to
* be the leftmost character in the result, or the first character following the
* ellipses, but it will appear somewhere in the result.
*
* <p>In no case will it return a String of length greater than
* <code>maxWidth</code>.</p>
*
* <pre>
* StringUtils.abbreviate(null, *, *) = null
* StringUtils.abbreviate("", 0, 4) = ""
* StringUtils.abbreviate("abcdefghijklmno", -1, 10) = "abcdefg..."
* StringUtils.abbreviate("abcdefghijklmno", 0, 10) = "abcdefg..."
* StringUtils.abbreviate("abcdefghijklmno", 1, 10) = "abcdefg..."
* StringUtils.abbreviate("abcdefghijklmno", 4, 10) = "abcdefg..."
* StringUtils.abbreviate("abcdefghijklmno", 5, 10) = "...fghi..."
* StringUtils.abbreviate("abcdefghijklmno", 6, 10) = "...ghij..."
* StringUtils.abbreviate("abcdefghijklmno", 8, 10) = "...ijklmno"
* StringUtils.abbreviate("abcdefghijklmno", 10, 10) = "...ijklmno"
* StringUtils.abbreviate("abcdefghijklmno", 12, 10) = "...ijklmno"
* StringUtils.abbreviate("abcdefghij", 0, 3) = IllegalArgumentException
* StringUtils.abbreviate("abcdefghij", 5, 6) = IllegalArgumentException
* </pre>
*
* @param str the String to check, may be null
* @param offset left edge of source String
* @param maxWidth maximum length of result String, must be at least 4
* @return abbreviated String, <code>null</code> if null String input
* @throws IllegalArgumentException if the width is too small
* @since 2.0
*/
public static String abbreviate(String str, int offset, int maxWidth) {
if (str == null) {
return null;
}
if (maxWidth < 4) {
throw new IllegalArgumentException("Minimum abbreviation width is 4");
}
if (str.length() <= maxWidth) {
return str;
}
if (offset > str.length()) {
offset = str.length();
}
if ((str.length() - offset) < (maxWidth - 3)) {
offset = str.length() - (maxWidth - 3);
}
if (offset <= 4) {
return str.substring(0, maxWidth - 3) + "...";
}
if (maxWidth < 7) {
throw new IllegalArgumentException("Minimum abbreviation width with offset is 7");
}
if ((offset + (maxWidth - 3)) < str.length()) {
return "..." + abbreviate(str.substring(offset), maxWidth - 3);
}
return "..." + str.substring(str.length() - (maxWidth - 3));
}
// ContainsAny
//-----------------------------------------------------------------------
/**
* <p>Checks if the String contains any character in the given
* set of characters.</p>
*
* <p>A <code>null</code> String will return <code>false</code>.
* A <code>null</code> or zero length search array will return <code>false</code>.</p>
*
* <pre>
* StringUtils.containsAny(null, *) = false
* StringUtils.containsAny("", *) = false
* StringUtils.containsAny(*, null) = false
* StringUtils.containsAny(*, []) = false
* StringUtils.containsAny("zzabyycdxx",['z','a']) = true
* StringUtils.containsAny("zzabyycdxx",['b','y']) = true
* StringUtils.containsAny("aba", ['z']) = false
* </pre>
*
* @param str the String to check, may be null
* @param searchChars the chars to search for, may be null
* @return the <code>true</code> if any of the chars are found,
* <code>false</code> if no match or null input
* @since 2.4
*/
public static boolean containsAny(String str, char... searchChars) {
if (str == null || str.length() == 0 || searchChars == null || searchChars.length == 0) {
return false;
}
for (int i = 0; i < str.length(); i++) {
char ch = str.charAt(i);
for (int j = 0; j < searchChars.length; j++) {
if (searchChars[j] == ch) {
return true;
}
}
}
return false;
}
/**
* <p>Replaces all occurrences of a String within another String.</p>
*
* <p>A {@code null} reference passed to this method is a no-op.</p>
*
* <pre>
* StringUtils.replace(null, *, *) = null
* StringUtils.replace("", *, *) = ""
* StringUtils.replace("any", null, *) = "any"
* StringUtils.replace("any", *, null) = "any"
* StringUtils.replace("any", "", *) = "any"
* StringUtils.replace("aba", "a", null) = "aba"
* StringUtils.replace("aba", "a", "") = "b"
* StringUtils.replace("aba", "a", "z") = "zbz"
* </pre>
*
* @see #replace(String text, String searchString, String replacement, int max)
* @param text text to search and replace in, may be null
* @param searchString the String to search for, may be null
* @param replacement the String to replace it with, may be null
* @return the text with any replacements processed,
* {@code null} if null String input
*/
public static String replace(String text, String searchString, String replacement) {
return replace(text, searchString, replacement, -1);
}
/**
* <p>Replaces a String with another String inside a larger String,
* for the first {@code max} values of the search String.</p>
*
* <p>A {@code null} reference passed to this method is a no-op.</p>
*
* <pre>
* StringUtils.replace(null, *, *, *) = null
* StringUtils.replace("", *, *, *) = ""
* StringUtils.replace("any", null, *, *) = "any"
* StringUtils.replace("any", *, null, *) = "any"
* StringUtils.replace("any", "", *, *) = "any"
* StringUtils.replace("any", *, *, 0) = "any"
* StringUtils.replace("abaa", "a", null, -1) = "abaa"
* StringUtils.replace("abaa", "a", "", -1) = "b"
* StringUtils.replace("abaa", "a", "z", 0) = "abaa"
* StringUtils.replace("abaa", "a", "z", 1) = "zbaa"
* StringUtils.replace("abaa", "a", "z", 2) = "zbza"
* StringUtils.replace("abaa", "a", "z", -1) = "zbzz"
* </pre>
*
* @param text text to search and replace in, may be null
* @param searchString the String to search for, may be null
* @param replacement the String to replace it with, may be null
* @param max maximum number of values to replace, or {@code -1} if no maximum
* @return the text with any replacements processed,
* {@code null} if null String input
*/
public static String replace(String text, String searchString, String replacement, int max) {
if (isEmpty(text) || isEmpty(searchString) || replacement == null || max == 0) {
return text;
}
int start = 0;
int end = text.indexOf(searchString, start);
if (end == INDEX_NOT_FOUND) {
return text;
}
int replLength = searchString.length();
int increase = replacement.length() - replLength;
increase = (increase < 0 ? 0 : increase);
increase *= (max < 0 ? 16 : (max > 64 ? 64 : max));
StringBuilder buf = new StringBuilder(text.length() + increase);
while (end != INDEX_NOT_FOUND) {
buf.append(text, start, end).append(replacement);
start = end + replLength;
if (--max == 0) {
break;
}
end = text.indexOf(searchString, start);
}
buf.append(text, start, text.length());
return buf.toString();
}
/**
* <p>
* Replaces all occurrences of Strings within another String.
* </p>
*
* <p>
* A <code>null</code> reference passed to this method is a no-op, or if
* any "search string" or "string to replace" is null, that replace will be
* ignored. This will not repeat. For repeating replaces, call the
* overloaded method.
* </p>
*
* <pre>
* StringUtils.replaceEach(null, *, *) = null
* StringUtils.replaceEach("", *, *) = ""
* StringUtils.replaceEach("aba", null, null) = "aba"
* StringUtils.replaceEach("aba", new String[0], null) = "aba"
* StringUtils.replaceEach("aba", null, new String[0]) = "aba"
* StringUtils.replaceEach("aba", new String[]{"a"}, null) = "aba"
* StringUtils.replaceEach("aba", new String[]{"a"}, new String[]{""}) = "b"
* StringUtils.replaceEach("aba", new String[]{null}, new String[]{"a"}) = "aba"
* StringUtils.replaceEach("abcde", new String[]{"ab", "d"}, new String[]{"w", "t"}) = "wcte"
* (example of how it does not repeat)
* StringUtils.replaceEach("abcde", new String[]{"ab", "d"}, new String[]{"d", "t"}) = "dcte"
* </pre>
*
* @param text
* text to search and replace in, no-op if null
* @param searchList
* the Strings to search for, no-op if null
* @param replacementList
* the Strings to replace them with, no-op if null
* @return the text with any replacements processed, <code>null</code> if
* null String input
* @throws IndexOutOfBoundsException
* if the lengths of the arrays are not the same (null is ok,
* and/or size 0)
* @since 2.4
*/
public static String replaceEach(String text, String[] searchList, String[] replacementList) {
return replaceEach(text, searchList, replacementList, false, 0);
}
/**
* <p>
* Replaces all occurrences of Strings within another String.
* </p>
*
* <p>
* A <code>null</code> reference passed to this method is a no-op, or if
* any "search string" or "string to replace" is null, that replace will be
* ignored.
* </p>
*
* <pre>
* StringUtils.replaceEach(null, *, *, *) = null
* StringUtils.replaceEach("", *, *, *) = ""
* StringUtils.replaceEach("aba", null, null, *) = "aba"
* StringUtils.replaceEach("aba", new String[0], null, *) = "aba"
* StringUtils.replaceEach("aba", null, new String[0], *) = "aba"
* StringUtils.replaceEach("aba", new String[]{"a"}, null, *) = "aba"
* StringUtils.replaceEach("aba", new String[]{"a"}, new String[]{""}, *) = "b"
* StringUtils.replaceEach("aba", new String[]{null}, new String[]{"a"}, *) = "aba"
* StringUtils.replaceEach("abcde", new String[]{"ab", "d"}, new String[]{"w", "t"}, *) = "wcte"
* (example of how it repeats)
* StringUtils.replaceEach("abcde", new String[]{"ab", "d"}, new String[]{"d", "t"}, false) = "dcte"
* StringUtils.replaceEach("abcde", new String[]{"ab", "d"}, new String[]{"d", "t"}, true) = "tcte"
* StringUtils.replaceEach("abcde", new String[]{"ab", "d"}, new String[]{"d", "ab"}, *) = IllegalArgumentException
* </pre>
*
* @param text
* text to search and replace in, no-op if null
* @param searchList
* the Strings to search for, no-op if null
* @param replacementList
* the Strings to replace them with, no-op if null
* @param repeat if true, then replace repeatedly
* until there are no more possible replacements or timeToLive < 0
* @param timeToLive
* if less than 0 then there is a circular reference and endless
* loop
* @return the text with any replacements processed, <code>null</code> if
* null String input
* @throws IllegalArgumentException
* if the search is repeating and there is an endless loop due
* to outputs of one being inputs to another
* @throws IndexOutOfBoundsException
* if the lengths of the arrays are not the same (null is ok,
* and/or size 0)
* @since 2.4
*/
private static String replaceEach(String text, String[] searchList, String[] replacementList,
boolean repeat, int timeToLive)
{
// mchyzer Performance note: This creates very few new objects (one major goal)
// let me know if there are performance requests, we can create a harness to measure
if (text == null || text.length() == 0 || searchList == null ||
searchList.length == 0 || replacementList == null || replacementList.length == 0)
{
return text;
}
// if recursing, this shouldnt be less than 0
if (timeToLive < 0) {
throw new IllegalStateException("TimeToLive of " + timeToLive + " is less than 0: " + text);
}
int searchLength = searchList.length;
int replacementLength = replacementList.length;
// make sure lengths are ok, these need to be equal
if (searchLength != replacementLength) {
throw new IllegalArgumentException("Search and Replace array lengths don't match: "
+ searchLength
+ " vs "
+ replacementLength);
}
// keep track of which still have matches
boolean[] noMoreMatchesForReplIndex = new boolean[searchLength];
// index on index that the match was found
int textIndex = -1;
int replaceIndex = -1;
int tempIndex = -1;
// index of replace array that will replace the search string found
// NOTE: logic duplicated below START
for (int i = 0; i < searchLength; i++) {
if (noMoreMatchesForReplIndex[i] || searchList[i] == null ||
searchList[i].length() == 0 || replacementList[i] == null)
{
continue;
}
tempIndex = text.indexOf(searchList[i]);
// see if we need to keep searching for this
if (tempIndex == -1) {
noMoreMatchesForReplIndex[i] = true;
} else {
if (textIndex == -1 || tempIndex < textIndex) {
textIndex = tempIndex;
replaceIndex = i;
}
}
}
// NOTE: logic mostly below END
// no search strings found, we are done
if (textIndex == -1) {
return text;
}
int start = 0;
// get a good guess on the size of the result buffer so it doesnt have to double if it goes over a bit
int increase = 0;
// count the replacement text elements that are larger than their corresponding text being replaced
for (int i = 0; i < searchList.length; i++) {
int greater = replacementList[i].length() - searchList[i].length();
if (greater > 0) {
increase += 3 * greater; // assume 3 matches
}
}
// have upper-bound at 20% increase, then let Java take over
increase = Math.min(increase, text.length() / 5);
StringBuffer buf = new StringBuffer(text.length() + increase);
while (textIndex != -1) {
for (int i = start; i < textIndex; i++) {
buf.append(text.charAt(i));
}
buf.append(replacementList[replaceIndex]);
start = textIndex + searchList[replaceIndex].length();
textIndex = -1;
replaceIndex = -1;
tempIndex = -1;
// find the next earliest match
// NOTE: logic mostly duplicated above START
for (int i = 0; i < searchLength; i++) {
if (noMoreMatchesForReplIndex[i] || searchList[i] == null ||
searchList[i].length() == 0 || replacementList[i] == null)
{
continue;
}
tempIndex = text.indexOf(searchList[i], start);
// see if we need to keep searching for this
if (tempIndex == -1) {
noMoreMatchesForReplIndex[i] = true;
} else {
if (textIndex == -1 || tempIndex < textIndex) {
textIndex = tempIndex;
replaceIndex = i;
}
}
}
// NOTE: logic duplicated above END
}
int textLength = text.length();
for (int i = start; i < textLength; i++) {
buf.append(text.charAt(i));
}
String result = buf.toString();
if (!repeat) {
return result;
}
return replaceEach(result, searchList, replacementList, repeat, timeToLive - 1);
}
// Joining
//-----------------------------------------------------------------------
/**
* <p>Joins the elements of the provided array into a single String
* containing the provided list of elements.</p>
*
* <p>No separator is added to the joined String.
* Null objects or empty strings within the array are represented by
* empty strings.</p>
*
* <pre>
* StringUtils.join(null) = null
* StringUtils.join([]) = ""
* StringUtils.join([null]) = ""
* StringUtils.join(["a", "b", "c"]) = "abc"
* StringUtils.join([null, "", "a"]) = "a"
* </pre>
*
* @param <T> the specific type of values to join together
* @param elements the values to join together, may be null
* @return the joined String, {@code null} if null array input
* @since 2.0
* @since 3.0 Changed signature to use varargs
*/
@SafeVarargs
public static <T> String join(T... elements) {
return join(elements, null);
}
/**
* <p>Joins the elements of the provided array into a single String
* containing the provided list of elements.</p>
*
* <p>No delimiter is added before or after the list.
* Null objects or empty strings within the array are represented by
* empty strings.</p>
*
* <pre>
* StringUtils.join(null, *) = null
* StringUtils.join([], *) = ""
* StringUtils.join([null], *) = ""
* StringUtils.join(["a", "b", "c"], ';') = "a;b;c"
* StringUtils.join(["a", "b", "c"], null) = "abc"
* StringUtils.join([null, "", "a"], ';') = ";;a"
* </pre>
*
* @param array the array of values to join together, may be null
* @param separator the separator character to use
* @return the joined String, {@code null} if null array input
* @since 2.0
*/
public static String join(Object[] array, char separator) {
if (array == null) {
return null;
}
return join(array, separator, 0, array.length);
}
/**
* <p>Joins the elements of the provided array into a single String
* containing the provided list of elements.</p>
*
* <p>No delimiter is added before or after the list.
* Null objects or empty strings within the array are represented by
* empty strings.</p>
*
* <pre>
* StringUtils.join(null, *) = null
* StringUtils.join([], *) = ""
* StringUtils.join([null], *) = ""
* StringUtils.join(["a", "b", "c"], ';') = "a;b;c"
* StringUtils.join(["a", "b", "c"], null) = "abc"
* StringUtils.join([null, "", "a"], ';') = ";;a"
* </pre>
*
* @param array the array of values to join together, may be null
* @param separator the separator character to use
* @param startIndex the first index to start joining from. It is
* an error to pass in an end index past the end of the array
* @param endIndex the index to stop joining from (exclusive). It is
* an error to pass in an end index past the end of the array
* @return the joined String, {@code null} if null array input
* @since 2.0
*/
public static String join(Object[] array, char separator, int startIndex, int endIndex) {
if (array == null) {
return null;
}
int noOfItems = (endIndex - startIndex);
if (noOfItems <= 0) {
return EMPTY;
}
StringBuilder buf = new StringBuilder(noOfItems * 16);
for (int i = startIndex; i < endIndex; i++) {
if (i > startIndex) {
buf.append(separator);
}
if (array[i] != null) {
buf.append(array[i]);
}
}
return buf.toString();
}
/**
* <p>Joins the elements of the provided array into a single String
* containing the provided list of elements.</p>
*
* <p>No delimiter is added before or after the list.
* A {@code null} separator is the same as an empty String ("").
* Null objects or empty strings within the array are represented by
* empty strings.</p>
*
* <pre>
* StringUtils.join(null, *) = null
* StringUtils.join([], *) = ""
* StringUtils.join([null], *) = ""
* StringUtils.join(["a", "b", "c"], "--") = "a--b--c"
* StringUtils.join(["a", "b", "c"], null) = "abc"
* StringUtils.join(["a", "b", "c"], "") = "abc"
* StringUtils.join([null, "", "a"], ',') = ",,a"
* </pre>
*
* @param array the array of values to join together, may be null
* @param separator the separator character to use, null treated as ""
* @return the joined String, {@code null} if null array input
*/
public static String join(Object[] array, String separator) {
if (array == null) {
return null;
}
return join(array, separator, 0, array.length);
}
/**
* <p>Joins the elements of the provided array into a single String
* containing the provided list of elements.</p>
*
* <p>No delimiter is added before or after the list.
* A {@code null} separator is the same as an empty String ("").
* Null objects or empty strings within the array are represented by
* empty strings.</p>
*
* <pre>
* StringUtils.join(null, *) = null
* StringUtils.join([], *) = ""
* StringUtils.join([null], *) = ""
* StringUtils.join(["a", "b", "c"], "--") = "a--b--c"
* StringUtils.join(["a", "b", "c"], null) = "abc"
* StringUtils.join(["a", "b", "c"], "") = "abc"
* StringUtils.join([null, "", "a"], ',') = ",,a"
* </pre>
*
* @param array the array of values to join together, may be null
* @param separator the separator character to use, null treated as ""
* @param startIndex the first index to start joining from. It is
* an error to pass in an end index past the end of the array
* @param endIndex the index to stop joining from (exclusive). It is
* an error to pass in an end index past the end of the array
* @return the joined String, {@code null} if null array input
*/
public static String join(Object[] array, String separator, int startIndex, int endIndex) {
if (array == null) {
return null;
}
if (separator == null) {
separator = EMPTY;
}
// endIndex - startIndex > 0: Len = NofStrings *(len(firstString) + len(separator))
// (Assuming that all Strings are roughly equally long)
int noOfItems = (endIndex - startIndex);
if (noOfItems <= 0) {
return EMPTY;
}
StringBuilder buf = new StringBuilder(noOfItems * 16);
for (int i = startIndex; i < endIndex; i++) {
if (i > startIndex) {
buf.append(separator);
}
if (array[i] != null) {
buf.append(array[i]);
}
}
return buf.toString();
}
private StringUtils() {}
// -------------------------------------------------------------------------
// XXX: The following methods are taken from ObjectUtils
// -------------------------------------------------------------------------
/**
* <p>
* Compares two strings for equality, where either one or both objects may
* be {@code null}.
* </p>
*
* <pre>
* ObjectUtils.equals(null, null) = true
* ObjectUtils.equals(null, "") = false
* ObjectUtils.equals("", null) = false
* ObjectUtils.equals("", "") = true
* </pre>
*
* @param o1 the first object, may be {@code null}
* @param o2 the second object, may be {@code null}
* @return {@code true} if the values of both objects are the same
*/
public static boolean equals(String o1, String o2) {
return o1 == null ? o2 == null : o1.equals(o2);
}
/**
* <p>
* Compares two objects for deep equality, where either one or both objects
* may be {@code null}.
* </p>
*
* <pre>
* ObjectUtils.equals(null, null) = true
* ObjectUtils.equals(null, "") = false
* ObjectUtils.equals("", null) = false
* ObjectUtils.equals("", "") = true
* ObjectUtils.equals(Boolean.TRUE, null) = false
* ObjectUtils.equals(Boolean.TRUE, "true") = false
* ObjectUtils.equals(Boolean.TRUE, Boolean.TRUE) = true
* ObjectUtils.equals(Boolean.TRUE, Boolean.FALSE) = false
* </pre>
*
* @param o1 the first object, may be {@code null}
* @param o2 the second object, may be {@code null}
* @return {@code true} if the values of both objects are the same
*/
public static boolean equals(Object o1, Object o2) {
if (o1 == o2)
return true;
else if ((o1 == null) || (o2 == null))
return false;
else if (o1.getClass().isArray())
if (o1 instanceof Object[] && o2 instanceof Object[])
return Arrays.deepEquals((Object[]) o1, (Object[]) o2);
else if (o1 instanceof byte[] && o2 instanceof byte[])
return Arrays.equals((byte[]) o1, (byte[]) o2);
else if (o1 instanceof short[] && o2 instanceof short[])
return Arrays.equals((short[]) o1, (short[]) o2);
else if (o1 instanceof int[] && o2 instanceof int[])
return Arrays.equals((int[]) o1, (int[]) o2);
else if (o1 instanceof long[] && o2 instanceof long[])
return Arrays.equals((long[]) o1, (long[]) o2);
else if (o1 instanceof float[] && o2 instanceof float[])
return Arrays.equals((float[]) o1, (float[]) o2);
else if (o1 instanceof double[] && o2 instanceof double[])
return Arrays.equals((double[]) o1, (double[]) o2);
else if (o1 instanceof char[] && o2 instanceof char[])
return Arrays.equals((char[]) o1, (char[]) o2);
else if (o1 instanceof boolean[] && o2 instanceof boolean[])
return Arrays.equals((boolean[]) o1, (boolean[]) o2);
else
return false;
else
return o1.equals(o2);
}
/**
* <p>Returns a default value if the object passed is {@code null}.</p>
*
* <pre>
* ObjectUtils.defaultIfNull(null, null) = null
* ObjectUtils.defaultIfNull(null, "") = ""
* ObjectUtils.defaultIfNull(null, "zz") = "zz"
* ObjectUtils.defaultIfNull("abc", *) = "abc"
* ObjectUtils.defaultIfNull(Boolean.TRUE, *) = Boolean.TRUE
* </pre>
*
* @param <T> the type of the object
* @param object the {@code Object} to test, may be {@code null}
* @param defaultValue the default value to return, may be {@code null}
* @return {@code object} if it is not {@code null}, defaultValue otherwise
*/
public static <T> T defaultIfNull(T object, T defaultValue) {
return object != null ? object : defaultValue;
}
/**
* <p>Returns the first non-{@code null} argument.</p>
*
* @param <T> the type of the objects
* @param objects the elements to test, may not be {@code null} but empty
* @return first non-{@code null} element in {@code objects}, otherwise {@code null}
*/
@SafeVarargs
public static <T> T firstNonNull(T... objects) {
for (T object : objects) {
if (object != null)
return object;
}
return null;
}
// -------------------------------------------------------------------------
// XXX: The following methods are not part of Apache's commons-lang library
// -------------------------------------------------------------------------
/**
* Convert a string to camel case
*/
public static String toCamelCase(String string) {
StringBuilder result = new StringBuilder();
// [#2515] - Keep trailing underscores
for (String word : string.split("_", -1)) {
// Uppercase first letter of a word
if (word.length() > 0) {
// [#82] - If a word starts with a digit, prevail the
// underscore to prevent naming clashes
if (Character.isDigit(word.charAt(0))) {
result.append("_");
}
result.append(word.substring(0, 1).toUpperCase());
result.append(word.substring(1).toLowerCase());
}
// If no letter exists, prevail the underscore (e.g. leading
// underscores)
else {
result.append("_");
}
}
return result.toString();
}
/**
* Convert a string to camel case starting with a lower case letter
*/
public static String toCamelCaseLC(String string) {
return toLC(toCamelCase(string));
}
/**
* Change a string's first letter to lower case
*/
public static String toLC(String string) {
return toLC(string, Locale.getDefault());
}
/**
* Change a string's first letter to lower case
*/
public static String toLC(String string, Locale locale) {
if (string == null || string.isEmpty())
return string;
return string.substring(0, 1).toLowerCase(locale) + string.substring(1);
}
/**
* Change a string's first letter to upper case
*/
public static String toUC(String string) {
return toUC(string, Locale.getDefault());
}
/**
* Change a string's first letter to upper case
*/
public static String toUC(String string, Locale locale) {
if (string == null || string.isEmpty())
return string;
return string.substring(0, 1).toUpperCase(locale) + string.substring(1);
}
/**
* A custom adaptation of {@link Pattern#split(CharSequence, int)}.
* <p>
* This is useful if the matched split-tokens should be returned as well.
* For example: <pre><code>
* split("e", "hello world") // ["h", "e", "llo world"]
* split("o", "hello world") // ["hell", "o", " w", "o", "rld"]
* split("[eo]", "hello world") // ["h", "e", "ll", "o", " w", "o", "rld"]
* </code></pre>
* <p>
* The result will always be an odd-length array.
*/
public static String[] split(String regex, CharSequence input) {
int index = 0;
ArrayList<String> matchList = new ArrayList<>();
Matcher m = Pattern.compile(regex).matcher(input);
// Add segments before each match found
while (m.find()) {
matchList.add(input.subSequence(index, m.start()).toString());
matchList.add(input.subSequence(m.start(), m.end()).toString());
index = m.end();
}
// If no match was found, return this
if (index == 0)
return new String[] { input.toString() };
// Add remaining segment
matchList.add(input.subSequence(index, input.length()).toString());
// Construct result
Iterator<String> it = matchList.iterator();
while (it.hasNext()) {
if ("".equals(it.next())) {
it.remove();
}
}
String[] result = new String[matchList.size()];
return matchList.toArray(result);
}
}
| jOOQ/jOOQ | jOOQ/src/main/java/org/jooq/tools/StringUtils.java |
1,438 | /*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2017 by Hitachi Vantara : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans.steps.valuemapper;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.pentaho.di.core.KettleEnvironment;
import org.pentaho.di.core.RowMetaAndData;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.exception.KettleValueException;
import org.pentaho.di.core.plugins.PluginRegistry;
import org.pentaho.di.core.plugins.StepPluginType;
import org.pentaho.di.core.row.RowMeta;
import org.pentaho.di.core.row.RowMetaInterface;
import org.pentaho.di.core.row.ValueMetaInterface;
import org.pentaho.di.core.row.value.ValueMetaString;
import org.pentaho.di.trans.RowProducer;
import org.pentaho.di.trans.RowStepCollector;
import org.pentaho.di.trans.Trans;
import org.pentaho.di.trans.TransHopMeta;
import org.pentaho.di.trans.TransMeta;
import org.pentaho.di.trans.step.StepInterface;
import org.pentaho.di.trans.step.StepMeta;
import org.pentaho.di.trans.steps.dummytrans.DummyTransMeta;
import org.pentaho.di.trans.steps.injector.InjectorMeta;
import junit.framework.TestCase;
/**
* Test class for the ValueMapper step.
*
* @author Sven Boden
*/
public class ValueMapperIT extends TestCase {
public RowMetaInterface createRowMetaInterface() {
RowMetaInterface rm = new RowMeta();
ValueMetaInterface[] valuesMeta = { new ValueMetaString( "field1" ), };
for ( int i = 0; i < valuesMeta.length; i++ ) {
rm.addValueMeta( valuesMeta[i] );
}
return rm;
}
public List<RowMetaAndData> createData() {
List<RowMetaAndData> list = new ArrayList<RowMetaAndData>();
RowMetaInterface rm = createRowMetaInterface();
Object[] r1 = new Object[] { "abc" };
Object[] r2 = new Object[] { "ABC" };
Object[] r3 = new Object[] { "def" };
Object[] r4 = new Object[] { "def" };
Object[] r5 = new Object[] { "ghij" };
Object[] r6 = new Object[] { "zzz" };
Object[] r7 = new Object[] { "" };
Object[] r8 = new Object[] { null };
Object[] r9 = new Object[] { "abc" };
list.add( new RowMetaAndData( rm, r1 ) );
list.add( new RowMetaAndData( rm, r2 ) );
list.add( new RowMetaAndData( rm, r3 ) );
list.add( new RowMetaAndData( rm, r4 ) );
list.add( new RowMetaAndData( rm, r5 ) );
list.add( new RowMetaAndData( rm, r6 ) );
list.add( new RowMetaAndData( rm, r7 ) );
list.add( new RowMetaAndData( rm, r8 ) );
list.add( new RowMetaAndData( rm, r9 ) );
return list;
}
/**
* Create the meta data for the results of test case 1.
*/
public RowMetaInterface createRowMetaInterface1() {
RowMetaInterface rm = new RowMeta();
ValueMetaInterface[] valuesMeta =
{ new ValueMetaString( "field1" ), new ValueMetaString( "new_field" ), };
for ( int i = 0; i < valuesMeta.length; i++ ) {
rm.addValueMeta( valuesMeta[i] );
}
return rm;
}
/**
* Create result data for test case 1.
*/
public List<RowMetaAndData> createResultData1() {
List<RowMetaAndData> list = new ArrayList<RowMetaAndData>();
RowMetaInterface rm = createRowMetaInterface1();
Object[] r1 = new Object[] { "abc", "begin" };
Object[] r2 = new Object[] { "ABC", "BEG_LONGER" };
Object[] r3 = new Object[] { "def", "test" };
Object[] r4 = new Object[] { "def", "test" };
Object[] r5 = new Object[] { "ghij", null };
Object[] r6 = new Object[] { "zzz", null };
Object[] r7 = new Object[] { "", null };
Object[] r8 = new Object[] { null, null };
Object[] r9 = new Object[] { "abc", "begin" };
list.add( new RowMetaAndData( rm, r1 ) );
list.add( new RowMetaAndData( rm, r2 ) );
list.add( new RowMetaAndData( rm, r3 ) );
list.add( new RowMetaAndData( rm, r4 ) );
list.add( new RowMetaAndData( rm, r5 ) );
list.add( new RowMetaAndData( rm, r6 ) );
list.add( new RowMetaAndData( rm, r7 ) );
list.add( new RowMetaAndData( rm, r8 ) );
list.add( new RowMetaAndData( rm, r9 ) );
return list;
}
/**
* Create result data for test case 2.
*/
public List<RowMetaAndData> createResultData2() {
List<RowMetaAndData> list = new ArrayList<RowMetaAndData>();
RowMetaInterface rm = createRowMetaInterface();
Object[] r1 = new Object[] { "begin" };
Object[] r2 = new Object[] { "BEG_LONGER" };
Object[] r3 = new Object[] { "test" };
Object[] r4 = new Object[] { "test" };
Object[] r5 = new Object[] { null };
Object[] r6 = new Object[] { "zzz" };
Object[] r7 = new Object[] { null };
Object[] r8 = new Object[] { null };
Object[] r9 = new Object[] { "begin" };
list.add( new RowMetaAndData( rm, r1 ) );
list.add( new RowMetaAndData( rm, r2 ) );
list.add( new RowMetaAndData( rm, r3 ) );
list.add( new RowMetaAndData( rm, r4 ) );
list.add( new RowMetaAndData( rm, r5 ) );
list.add( new RowMetaAndData( rm, r6 ) );
list.add( new RowMetaAndData( rm, r7 ) );
list.add( new RowMetaAndData( rm, r8 ) );
list.add( new RowMetaAndData( rm, r9 ) );
return list;
}
/**
* Create result data for test case 3.
*/
public List<RowMetaAndData> createResultData3() {
List<RowMetaAndData> list = new ArrayList<RowMetaAndData>();
RowMetaInterface rm = createRowMetaInterface();
Object[] r1 = new Object[] { "begin" };
Object[] r2 = new Object[] { "BEG_LONGER" };
Object[] r3 = new Object[] { "test" };
Object[] r4 = new Object[] { "test" };
Object[] r5 = new Object[] { null };
Object[] r6 = new Object[] { "zzz" };
Object[] r7 = new Object[] { "emptyField" };
Object[] r8 = new Object[] { "emptyField" };
Object[] r9 = new Object[] { "begin" };
list.add( new RowMetaAndData( rm, r1 ) );
list.add( new RowMetaAndData( rm, r2 ) );
list.add( new RowMetaAndData( rm, r3 ) );
list.add( new RowMetaAndData( rm, r4 ) );
list.add( new RowMetaAndData( rm, r5 ) );
list.add( new RowMetaAndData( rm, r6 ) );
list.add( new RowMetaAndData( rm, r7 ) );
list.add( new RowMetaAndData( rm, r8 ) );
list.add( new RowMetaAndData( rm, r9 ) );
return list;
}
/**
* Create result data for test case 4.
*/
public List<RowMetaAndData> createResultData4() {
List<RowMetaAndData> list = new ArrayList<RowMetaAndData>();
RowMetaInterface rm = createRowMetaInterface1();
Object[] r1 = new Object[] { "abc", "begin" };
Object[] r2 = new Object[] { "ABC", "BEG_LONGER" };
Object[] r3 = new Object[] { "def", "test" };
Object[] r4 = new Object[] { "def", "test" };
Object[] r5 = new Object[] { "ghij", null };
Object[] r6 = new Object[] { "zzz", "default" };
Object[] r7 = new Object[] { "", null };
Object[] r8 = new Object[] { null, null };
Object[] r9 = new Object[] { "abc", "begin" };
list.add( new RowMetaAndData( rm, r1 ) );
list.add( new RowMetaAndData( rm, r2 ) );
list.add( new RowMetaAndData( rm, r3 ) );
list.add( new RowMetaAndData( rm, r4 ) );
list.add( new RowMetaAndData( rm, r5 ) );
list.add( new RowMetaAndData( rm, r6 ) );
list.add( new RowMetaAndData( rm, r7 ) );
list.add( new RowMetaAndData( rm, r8 ) );
list.add( new RowMetaAndData( rm, r9 ) );
return list;
}
/**
* Create result data for test case 5.
*/
public List<RowMetaAndData> createResultData5() {
List<RowMetaAndData> list = new ArrayList<RowMetaAndData>();
RowMetaInterface rm = createRowMetaInterface();
Object[] r1 = new Object[] { "begin" };
Object[] r2 = new Object[] { "BEG_LONGER" };
Object[] r3 = new Object[] { "test" };
Object[] r4 = new Object[] { "test" };
Object[] r5 = new Object[] { null };
Object[] r6 = new Object[] { "default" };
Object[] r7 = new Object[] { null };
Object[] r8 = new Object[] { null };
Object[] r9 = new Object[] { "begin" };
list.add( new RowMetaAndData( rm, r1 ) );
list.add( new RowMetaAndData( rm, r2 ) );
list.add( new RowMetaAndData( rm, r3 ) );
list.add( new RowMetaAndData( rm, r4 ) );
list.add( new RowMetaAndData( rm, r5 ) );
list.add( new RowMetaAndData( rm, r6 ) );
list.add( new RowMetaAndData( rm, r7 ) );
list.add( new RowMetaAndData( rm, r8 ) );
list.add( new RowMetaAndData( rm, r9 ) );
return list;
}
/**
* Create result data for test case 6.
*/
public List<RowMetaAndData> createResultData6() {
List<RowMetaAndData> list = new ArrayList<RowMetaAndData>();
RowMetaInterface rm = createRowMetaInterface();
Object[] r1 = new Object[] { "begin" };
Object[] r2 = new Object[] { "BEG_LONGER" };
Object[] r3 = new Object[] { "test" };
Object[] r4 = new Object[] { "test" };
Object[] r5 = new Object[] { "default" };
Object[] r6 = new Object[] { "default" };
Object[] r7 = new Object[] { "emptyField" };
Object[] r8 = new Object[] { "emptyField" };
Object[] r9 = new Object[] { "begin" };
list.add( new RowMetaAndData( rm, r1 ) );
list.add( new RowMetaAndData( rm, r2 ) );
list.add( new RowMetaAndData( rm, r3 ) );
list.add( new RowMetaAndData( rm, r4 ) );
list.add( new RowMetaAndData( rm, r5 ) );
list.add( new RowMetaAndData( rm, r6 ) );
list.add( new RowMetaAndData( rm, r7 ) );
list.add( new RowMetaAndData( rm, r8 ) );
list.add( new RowMetaAndData( rm, r9 ) );
return list;
}
/**
* Check the 2 lists comparing the rows in order. If they are not the same fail the test.
*/
public void checkRows( List<RowMetaAndData> rows1, List<RowMetaAndData> rows2 ) {
int idx = 1;
if ( rows1.size() != rows2.size() ) {
fail( "Number of rows is not the same: " + rows1.size() + " and " + rows2.size() );
}
Iterator<RowMetaAndData> it1 = rows1.iterator();
Iterator<RowMetaAndData> it2 = rows2.iterator();
while ( it1.hasNext() && it2.hasNext() ) {
RowMetaAndData rm1 = it1.next();
RowMetaAndData rm2 = it2.next();
Object[] r1 = rm1.getData();
Object[] r2 = rm2.getData();
if ( rm1.size() != rm2.size() ) {
fail( "row nr " + idx + " is not equal" );
}
int[] fields = new int[r1.length];
for ( int ydx = 0; ydx < r1.length; ydx++ ) {
fields[ydx] = ydx;
}
try {
if ( rm1.getRowMeta().compare( r1, r2, fields ) != 0 ) {
fail( "row nr " + idx + " is not equal" );
}
} catch ( KettleValueException e ) {
fail( "row nr " + idx + " is not equal" );
}
idx++;
}
}
/**
* Test case for valuemapper step. Injector step to a valuemapper step to a dummy step. Rows go in and should be
* mapped accordingly.
*
* This test will write the mappings in a new field.
*/
public void testValueMapper1() throws Exception {
KettleEnvironment.init();
//
// Create a new transformation...
//
TransMeta transMeta = new TransMeta();
transMeta.setName( "valuemappertest1" );
PluginRegistry registry = PluginRegistry.getInstance();
//
// create an injector step...
//
String injectorStepname = "injector step";
InjectorMeta im = new InjectorMeta();
// Set the information of the injector.
String injectorPid = registry.getPluginId( StepPluginType.class, im );
StepMeta injectorStep = new StepMeta( injectorPid, injectorStepname, im );
transMeta.addStep( injectorStep );
//
// Create a dummy step 1
//
String dummyStepname1 = "dummy step 1";
DummyTransMeta dm1 = new DummyTransMeta();
String dummyPid1 = registry.getPluginId( StepPluginType.class, dm1 );
StepMeta dummyStep1 = new StepMeta( dummyPid1, dummyStepname1, dm1 );
transMeta.addStep( dummyStep1 );
TransHopMeta hi = new TransHopMeta( injectorStep, dummyStep1 );
transMeta.addTransHop( hi );
//
// Create a ValueMapper step
//
String valueMapperName = "valuemapper step";
ValueMapperMeta vm = new ValueMapperMeta();
vm.setFieldToUse( "field1" );
vm.setTargetField( "new_field" );
vm.setSourceValue( new String[] { "abc", "ABC", "def", "ghij" } );
vm.setTargetValue( new String[] { "begin", "BEG_LONGER", "test", "" } );
String valueMapperPid = registry.getPluginId( StepPluginType.class, vm );
StepMeta valueMapperStep = new StepMeta( valueMapperPid, valueMapperName, vm );
transMeta.addStep( valueMapperStep );
TransHopMeta hi2 = new TransHopMeta( dummyStep1, valueMapperStep );
transMeta.addTransHop( hi2 );
//
// Create a dummy step 2
//
String dummyStepname2 = "dummy step 2";
DummyTransMeta dm2 = new DummyTransMeta();
String dummyPid2 = registry.getPluginId( StepPluginType.class, dm2 );
StepMeta dummyStep2 = new StepMeta( dummyPid2, dummyStepname2, dm2 );
transMeta.addStep( dummyStep2 );
TransHopMeta hi3 = new TransHopMeta( valueMapperStep, dummyStep2 );
transMeta.addTransHop( hi3 );
// Now execute the transformation...
Trans trans = new Trans( transMeta );
trans.prepareExecution( null );
StepInterface si = trans.getStepInterface( dummyStepname1, 0 );
RowStepCollector dummyRc1 = new RowStepCollector();
si.addRowListener( dummyRc1 );
si = trans.getStepInterface( valueMapperName, 0 );
RowStepCollector valueMapperRc = new RowStepCollector();
si.addRowListener( valueMapperRc );
si = trans.getStepInterface( dummyStepname2, 0 );
RowStepCollector dummyRc = new RowStepCollector();
si.addRowListener( dummyRc );
RowProducer rp = trans.addRowProducer( injectorStepname, 0 );
trans.startThreads();
// add rows
List<RowMetaAndData> inputList = createData();
Iterator<RowMetaAndData> it = inputList.iterator();
while ( it.hasNext() ) {
RowMetaAndData rm = it.next();
rp.putRow( rm.getRowMeta(), rm.getData() );
}
rp.finished();
trans.waitUntilFinished();
// Compare the results
List<RowMetaAndData> resultRows = dummyRc.getRowsWritten();
List<RowMetaAndData> goldenImageRows = createResultData1();
checkRows( goldenImageRows, resultRows );
}
/**
* Test case for valuemapper step. Injector step to a valuemapper step to a dummy step. Rows go in and should be
* mapped accordingly.
*
* This test will write the mappings in the same field.
*/
public void testValueMapper2() throws Exception {
KettleEnvironment.init();
//
// Create a new transformation...
//
TransMeta transMeta = new TransMeta();
transMeta.setName( "valuemappertest2" );
PluginRegistry registry = PluginRegistry.getInstance();
//
// create an injector step...
//
String injectorStepname = "injector step";
InjectorMeta im = new InjectorMeta();
// Set the information of the injector.
String injectorPid = registry.getPluginId( StepPluginType.class, im );
StepMeta injectorStep = new StepMeta( injectorPid, injectorStepname, im );
transMeta.addStep( injectorStep );
//
// Create a dummy step 1
//
String dummyStepname1 = "dummy step 1";
DummyTransMeta dm1 = new DummyTransMeta();
String dummyPid1 = registry.getPluginId( StepPluginType.class, dm1 );
StepMeta dummyStep1 = new StepMeta( dummyPid1, dummyStepname1, dm1 );
transMeta.addStep( dummyStep1 );
TransHopMeta hi = new TransHopMeta( injectorStep, dummyStep1 );
transMeta.addTransHop( hi );
//
// Create a step
//
String valueMapperName = "valuemapper step";
ValueMapperMeta vm = new ValueMapperMeta();
vm.setFieldToUse( "field1" );
vm.setTargetField( "" );
vm.setSourceValue( new String[] { "abc", "ABC", "def", "ghij" } );
vm.setTargetValue( new String[] { "begin", "BEG_LONGER", "test", "" } );
String valueMapperPid = registry.getPluginId( StepPluginType.class, vm );
StepMeta valueMapperStep = new StepMeta( valueMapperPid, valueMapperName, vm );
transMeta.addStep( valueMapperStep );
TransHopMeta hi2 = new TransHopMeta( dummyStep1, valueMapperStep );
transMeta.addTransHop( hi2 );
//
// Create a dummy step 2
//
String dummyStepname2 = "dummy step 2";
DummyTransMeta dm2 = new DummyTransMeta();
String dummyPid2 = registry.getPluginId( StepPluginType.class, dm2 );
StepMeta dummyStep2 = new StepMeta( dummyPid2, dummyStepname2, dm2 );
transMeta.addStep( dummyStep2 );
TransHopMeta hi3 = new TransHopMeta( valueMapperStep, dummyStep2 );
transMeta.addTransHop( hi3 );
// Now execute the transformation...
Trans trans = new Trans( transMeta );
trans.prepareExecution( null );
StepInterface si = trans.getStepInterface( dummyStepname1, 0 );
RowStepCollector dummyRc1 = new RowStepCollector();
si.addRowListener( dummyRc1 );
si = trans.getStepInterface( valueMapperName, 0 );
RowStepCollector valueMapperRc = new RowStepCollector();
si.addRowListener( valueMapperRc );
si = trans.getStepInterface( dummyStepname2, 0 );
RowStepCollector dummyRc = new RowStepCollector();
si.addRowListener( dummyRc );
RowProducer rp = trans.addRowProducer( injectorStepname, 0 );
trans.startThreads();
// add rows
List<RowMetaAndData> inputList = createData();
Iterator<RowMetaAndData> it = inputList.iterator();
while ( it.hasNext() ) {
RowMetaAndData rm = it.next();
rp.putRow( rm.getRowMeta(), rm.getData() );
}
rp.finished();
trans.waitUntilFinished();
// Compare the results
List<RowMetaAndData> resultRows = dummyRc.getRowsWritten();
List<RowMetaAndData> goldenImageRows = createResultData2();
checkRows( goldenImageRows, resultRows );
}
/**
* Test case for valuemapper step. Injector step to a valuemapper step to a dummy step. Rows go in and should be
* mapped accordingly.
*
* This test will explicitly test the empty field processing.
*/
public void testValueMapper3() throws Exception {
KettleEnvironment.init();
//
// Create a new transformation...
//
TransMeta transMeta = new TransMeta();
transMeta.setName( "valuemappertest3" );
PluginRegistry registry = PluginRegistry.getInstance();
//
// create an injector step...
//
String injectorStepname = "injector step";
InjectorMeta im = new InjectorMeta();
// Set the information of the injector.
String injectorPid = registry.getPluginId( StepPluginType.class, im );
StepMeta injectorStep = new StepMeta( injectorPid, injectorStepname, im );
transMeta.addStep( injectorStep );
//
// Create a dummy step 1
//
String dummyStepname1 = "dummy step 1";
DummyTransMeta dm1 = new DummyTransMeta();
String dummyPid1 = registry.getPluginId( StepPluginType.class, dm1 );
StepMeta dummyStep1 = new StepMeta( dummyPid1, dummyStepname1, dm1 );
transMeta.addStep( dummyStep1 );
TransHopMeta hi = new TransHopMeta( injectorStep, dummyStep1 );
transMeta.addTransHop( hi );
//
// Create a step
//
String valueMapperName = "valuemapper step";
ValueMapperMeta vm = new ValueMapperMeta();
vm.setFieldToUse( "field1" );
vm.setTargetField( "" );
vm.setSourceValue( new String[] { "abc", "ABC", "def", "ghij", null } );
vm.setTargetValue( new String[] { "begin", "BEG_LONGER", "test", "", "emptyField" } );
String valueMapperPid = registry.getPluginId( StepPluginType.class, vm );
StepMeta valueMapperStep = new StepMeta( valueMapperPid, valueMapperName, vm );
transMeta.addStep( valueMapperStep );
TransHopMeta hi2 = new TransHopMeta( dummyStep1, valueMapperStep );
transMeta.addTransHop( hi2 );
//
// Create a dummy step 2
//
String dummyStepname2 = "dummy step 2";
DummyTransMeta dm2 = new DummyTransMeta();
String dummyPid2 = registry.getPluginId( StepPluginType.class, dm2 );
StepMeta dummyStep2 = new StepMeta( dummyPid2, dummyStepname2, dm2 );
transMeta.addStep( dummyStep2 );
TransHopMeta hi3 = new TransHopMeta( valueMapperStep, dummyStep2 );
transMeta.addTransHop( hi3 );
// Now execute the transformation...
Trans trans = new Trans( transMeta );
trans.prepareExecution( null );
StepInterface si = trans.getStepInterface( dummyStepname1, 0 );
RowStepCollector dummyRc1 = new RowStepCollector();
si.addRowListener( dummyRc1 );
si = trans.getStepInterface( valueMapperName, 0 );
RowStepCollector valueMapperRc = new RowStepCollector();
si.addRowListener( valueMapperRc );
si = trans.getStepInterface( dummyStepname2, 0 );
RowStepCollector dummyRc = new RowStepCollector();
si.addRowListener( dummyRc );
RowProducer rp = trans.addRowProducer( injectorStepname, 0 );
trans.startThreads();
// add rows
List<RowMetaAndData> inputList = createData();
Iterator<RowMetaAndData> it = inputList.iterator();
while ( it.hasNext() ) {
RowMetaAndData rm = it.next();
rp.putRow( rm.getRowMeta(), rm.getData() );
}
rp.finished();
trans.waitUntilFinished();
// Compare the results
List<RowMetaAndData> resultRows = dummyRc.getRowsWritten();
List<RowMetaAndData> goldenImageRows = createResultData3();
checkRows( goldenImageRows, resultRows );
}
/*----------------- TODO */
/**
* Test case for valuemapper step. Injector step to a valuemapper step to a dummy step. Rows go in and should be
* mapped accordingly.
*
* This test will write the mappings in a new field, using a non matching default.
*/
public void testValueMapper4() throws Exception {
KettleEnvironment.init();
//
// Create a new transformation...
//
TransMeta transMeta = new TransMeta();
transMeta.setName( "valuemappertest4" );
PluginRegistry registry = PluginRegistry.getInstance();
//
// create an injector step...
//
String injectorStepname = "injector step";
InjectorMeta im = new InjectorMeta();
// Set the information of the injector.
String injectorPid = registry.getPluginId( StepPluginType.class, im );
StepMeta injectorStep = new StepMeta( injectorPid, injectorStepname, im );
transMeta.addStep( injectorStep );
//
// Create a dummy step 1
//
String dummyStepname1 = "dummy step 1";
DummyTransMeta dm1 = new DummyTransMeta();
String dummyPid1 = registry.getPluginId( StepPluginType.class, dm1 );
StepMeta dummyStep1 = new StepMeta( dummyPid1, dummyStepname1, dm1 );
transMeta.addStep( dummyStep1 );
TransHopMeta hi = new TransHopMeta( injectorStep, dummyStep1 );
transMeta.addTransHop( hi );
//
// Create a step
//
String valueMapperName = "valuemapper step";
ValueMapperMeta vm = new ValueMapperMeta();
vm.setFieldToUse( "field1" );
vm.setTargetField( "new_field" );
vm.setNonMatchDefault( "default" );
vm.setSourceValue( new String[] { "abc", "ABC", "def", "ghij" } );
vm.setTargetValue( new String[] { "begin", "BEG_LONGER", "test", "" } );
String valueMapperPid = registry.getPluginId( StepPluginType.class, vm );
StepMeta valueMapperStep = new StepMeta( valueMapperPid, valueMapperName, vm );
transMeta.addStep( valueMapperStep );
TransHopMeta hi2 = new TransHopMeta( dummyStep1, valueMapperStep );
transMeta.addTransHop( hi2 );
//
// Create a dummy step 2
//
String dummyStepname2 = "dummy step 2";
DummyTransMeta dm2 = new DummyTransMeta();
String dummyPid2 = registry.getPluginId( StepPluginType.class, dm2 );
StepMeta dummyStep2 = new StepMeta( dummyPid2, dummyStepname2, dm2 );
transMeta.addStep( dummyStep2 );
TransHopMeta hi3 = new TransHopMeta( valueMapperStep, dummyStep2 );
transMeta.addTransHop( hi3 );
// Now execute the transformation...
Trans trans = new Trans( transMeta );
trans.prepareExecution( null );
StepInterface si = trans.getStepInterface( dummyStepname1, 0 );
RowStepCollector dummyRc1 = new RowStepCollector();
si.addRowListener( dummyRc1 );
si = trans.getStepInterface( valueMapperName, 0 );
RowStepCollector valueMapperRc = new RowStepCollector();
si.addRowListener( valueMapperRc );
si = trans.getStepInterface( dummyStepname2, 0 );
RowStepCollector dummyRc = new RowStepCollector();
si.addRowListener( dummyRc );
RowProducer rp = trans.addRowProducer( injectorStepname, 0 );
trans.startThreads();
// add rows
List<RowMetaAndData> inputList = createData();
Iterator<RowMetaAndData> it = inputList.iterator();
while ( it.hasNext() ) {
RowMetaAndData rm = it.next();
rp.putRow( rm.getRowMeta(), rm.getData() );
}
rp.finished();
trans.waitUntilFinished();
// Compare the results
List<RowMetaAndData> resultRows = dummyRc.getRowsWritten();
List<RowMetaAndData> goldenImageRows = createResultData4();
checkRows( goldenImageRows, resultRows );
}
/**
* Test case for valuemapper step. Injector step to a valuemapper step to a dummy step. Rows go in and should be
* mapped accordingly.
*
* This test will write the mappings in the same field. Using a non matching default.
*/
public void testValueMapper5() throws Exception {
KettleEnvironment.init();
//
// Create a new transformation...
//
TransMeta transMeta = new TransMeta();
transMeta.setName( "valuemappertest5" );
PluginRegistry registry = PluginRegistry.getInstance();
//
// create an injector step...
//
String injectorStepname = "injector step";
InjectorMeta im = new InjectorMeta();
// Set the information of the injector.
String injectorPid = registry.getPluginId( StepPluginType.class, im );
StepMeta injectorStep = new StepMeta( injectorPid, injectorStepname, im );
transMeta.addStep( injectorStep );
//
// Create a dummy step 1
//
String dummyStepname1 = "dummy step 1";
DummyTransMeta dm1 = new DummyTransMeta();
String dummyPid1 = registry.getPluginId( StepPluginType.class, dm1 );
StepMeta dummyStep1 = new StepMeta( dummyPid1, dummyStepname1, dm1 );
transMeta.addStep( dummyStep1 );
TransHopMeta hi = new TransHopMeta( injectorStep, dummyStep1 );
transMeta.addTransHop( hi );
//
// Create a step
//
String valueMapperName = "valuemapper step";
ValueMapperMeta vm = new ValueMapperMeta();
vm.setFieldToUse( "field1" );
vm.setTargetField( "" );
vm.setNonMatchDefault( "default" );
vm.setSourceValue( new String[] { "abc", "ABC", "def", "ghij" } );
vm.setTargetValue( new String[] { "begin", "BEG_LONGER", "test", "" } );
String valueMapperPid = registry.getPluginId( StepPluginType.class, vm );
StepMeta valueMapperStep = new StepMeta( valueMapperPid, valueMapperName, vm );
transMeta.addStep( valueMapperStep );
TransHopMeta hi2 = new TransHopMeta( dummyStep1, valueMapperStep );
transMeta.addTransHop( hi2 );
//
// Create a dummy step 2
//
String dummyStepname2 = "dummy step 2";
DummyTransMeta dm2 = new DummyTransMeta();
String dummyPid2 = registry.getPluginId( StepPluginType.class, dm2 );
StepMeta dummyStep2 = new StepMeta( dummyPid2, dummyStepname2, dm2 );
transMeta.addStep( dummyStep2 );
TransHopMeta hi3 = new TransHopMeta( valueMapperStep, dummyStep2 );
transMeta.addTransHop( hi3 );
// Now execute the transformation...
Trans trans = new Trans( transMeta );
trans.prepareExecution( null );
StepInterface si = trans.getStepInterface( dummyStepname1, 0 );
RowStepCollector dummyRc1 = new RowStepCollector();
si.addRowListener( dummyRc1 );
si = trans.getStepInterface( valueMapperName, 0 );
RowStepCollector valueMapperRc = new RowStepCollector();
si.addRowListener( valueMapperRc );
si = trans.getStepInterface( dummyStepname2, 0 );
RowStepCollector dummyRc = new RowStepCollector();
si.addRowListener( dummyRc );
RowProducer rp = trans.addRowProducer( injectorStepname, 0 );
trans.startThreads();
// add rows
List<RowMetaAndData> inputList = createData();
Iterator<RowMetaAndData> it = inputList.iterator();
while ( it.hasNext() ) {
RowMetaAndData rm = it.next();
rp.putRow( rm.getRowMeta(), rm.getData() );
}
rp.finished();
trans.waitUntilFinished();
// Compare the results
List<RowMetaAndData> resultRows = dummyRc.getRowsWritten();
List<RowMetaAndData> goldenImageRows = createResultData5();
checkRows( goldenImageRows, resultRows );
}
/**
* Test case for valuemapper step. Injector step to a valuemapper step to a dummy step. Rows go in and should be
* mapped accordingly.
*
* This test will explicitly test the empty field processing. using a non matching default.
*/
public void testValueMapper6() throws Exception {
KettleEnvironment.init();
//
// Create a new transformation...
//
TransMeta transMeta = new TransMeta();
transMeta.setName( "valuemappertest6" );
PluginRegistry registry = PluginRegistry.getInstance();
//
// create an injector step...
//
String injectorStepname = "injector step";
InjectorMeta im = new InjectorMeta();
// Set the information of the injector.
String injectorPid = registry.getPluginId( StepPluginType.class, im );
StepMeta injectorStep = new StepMeta( injectorPid, injectorStepname, im );
transMeta.addStep( injectorStep );
//
// Create a dummy step 1
//
String dummyStepname1 = "dummy step 1";
DummyTransMeta dm1 = new DummyTransMeta();
String dummyPid1 = registry.getPluginId( StepPluginType.class, dm1 );
StepMeta dummyStep1 = new StepMeta( dummyPid1, dummyStepname1, dm1 );
transMeta.addStep( dummyStep1 );
TransHopMeta hi = new TransHopMeta( injectorStep, dummyStep1 );
transMeta.addTransHop( hi );
//
// Create a ValueMapper step
//
String valueMapperName = "valuemapper step";
ValueMapperMeta vm = new ValueMapperMeta();
vm.setFieldToUse( "field1" );
vm.setTargetField( "" );
vm.setNonMatchDefault( "default" );
vm.setSourceValue( new String[] { "abc", "ABC", "def", "ghijk", null } );
vm.setTargetValue( new String[] { "begin", "BEG_LONGER", "test", "blah", "emptyField" } );
String valueMapperPid = registry.getPluginId( StepPluginType.class, vm );
StepMeta valueMapperStep = new StepMeta( valueMapperPid, valueMapperName, vm );
transMeta.addStep( valueMapperStep );
TransHopMeta hi2 = new TransHopMeta( dummyStep1, valueMapperStep );
transMeta.addTransHop( hi2 );
//
// Create a dummy step 2
//
String dummyStepname2 = "dummy step 2";
DummyTransMeta dm2 = new DummyTransMeta();
String dummyPid2 = registry.getPluginId( StepPluginType.class, dm2 );
StepMeta dummyStep2 = new StepMeta( dummyPid2, dummyStepname2, dm2 );
transMeta.addStep( dummyStep2 );
TransHopMeta hi3 = new TransHopMeta( valueMapperStep, dummyStep2 );
transMeta.addTransHop( hi3 );
// Now execute the transformation...
Trans trans = new Trans( transMeta );
boolean prepare;
try {
trans.prepareExecution( null );
prepare = true;
} catch ( KettleException e ) {
prepare = false;
}
assertTrue( prepare );
StepInterface si = trans.getStepInterface( dummyStepname1, 0 );
RowStepCollector dummyRc1 = new RowStepCollector();
si.addRowListener( dummyRc1 );
si = trans.getStepInterface( valueMapperName, 0 );
RowStepCollector valueMapperRc = new RowStepCollector();
si.addRowListener( valueMapperRc );
si = trans.getStepInterface( dummyStepname2, 0 );
RowStepCollector dummyRc = new RowStepCollector();
si.addRowListener( dummyRc );
RowProducer rp = trans.addRowProducer( injectorStepname, 0 );
trans.startThreads();
// add rows
List<RowMetaAndData> inputList = createData();
Iterator<RowMetaAndData> it = inputList.iterator();
while ( it.hasNext() ) {
RowMetaAndData rm = it.next();
rp.putRow( rm.getRowMeta(), rm.getData() );
}
rp.finished();
trans.waitUntilFinished();
// Compare the results
List<RowMetaAndData> resultRows = dummyRc.getRowsWritten();
List<RowMetaAndData> goldenImageRows = createResultData6();
checkRows( goldenImageRows, resultRows );
}
}
| pentaho/pentaho-kettle | integration/src/it/java/org/pentaho/di/trans/steps/valuemapper/ValueMapperIT.java |
1,439 | 404: Not Found | ActorExpose/android | cSploit/src/main/java/org/csploit/android/plugins/mitm/hijacker/HijackerWebView.java |
1,440 | // Copyright 2000-2022 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
package com.intellij.openapi.vcs.impl;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.fileEditor.FileDocumentManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.vcs.*;
import com.intellij.openapi.vcs.changes.*;
import com.intellij.openapi.vcs.diff.DiffProvider;
import com.intellij.openapi.vcs.history.VcsRevisionNumber;
import com.intellij.openapi.vcs.impl.VcsBaseContentProvider.BaseContent;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.vcsUtil.VcsImplUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public final class LineStatusTrackerBaseContentUtil {
private static final Logger LOG = Logger.getInstance(LineStatusTrackerBaseContentUtil.class);
@Nullable
private static VcsBaseContentProvider findProviderFor(@NotNull Project project, @NotNull VirtualFile file) {
return VcsBaseContentProvider.EP_NAME.findFirstSafe(project, it -> it.isSupported(file));
}
public static boolean isSupported(@NotNull Project project, @NotNull VirtualFile file) {
return isHandledByVcs(project, file) || findProviderFor(project, file) != null;
}
public static boolean isTracked(@NotNull Project project, @NotNull VirtualFile file) {
FileStatus status = FileStatusManager.getInstance(project).getStatus(file);
if (status == FileStatus.ADDED ||
status == FileStatus.DELETED ||
status == FileStatus.UNKNOWN ||
status == FileStatus.IGNORED) {
return false;
}
return true;
}
private static boolean isHandledByVcs(@NotNull Project project, @NotNull VirtualFile file) {
return file.isInLocalFileSystem() && ProjectLevelVcsManager.getInstance(project).getVcsFor(file) != null;
}
@Nullable
public static BaseContent getBaseRevision(@NotNull Project project, @NotNull VirtualFile file) {
if (!isHandledByVcs(project, file)) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("File is not under VCS: %s", file));
}
return createFromProvider(project, file);
}
ChangeListManager changeListManager = ChangeListManager.getInstance(project);
Change change = changeListManager.getChange(file);
if (change != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Content by Change %s: %s", change, file));
}
return createFromLocalChange(project, change);
}
FileStatus status = changeListManager.getStatus(file);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("File status is %s: %s", status, file));
}
if (status == FileStatus.HIJACKED) {
return createForHijacked(project, file);
}
if (status == FileStatus.NOT_CHANGED) {
if (FileDocumentManager.getInstance().isFileModified(file)) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Document is modified: %s", file));
}
return createForModifiedDocument(project, file);
}
}
return null;
}
@Nullable
private static BaseContent createFromProvider(@NotNull Project project, @NotNull VirtualFile file) {
VcsBaseContentProvider provider = findProviderFor(project, file);
if (provider == null) return null;
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Content by provider %s: %s", provider, file));
}
return provider.getBaseRevision(file);
}
private static @Nullable BaseContent createFromLocalChange(@NotNull Project project, @NotNull Change change) {
ContentRevision beforeRevision = change.getBeforeRevision();
if (beforeRevision == null) return null;
return createBaseContent(project, beforeRevision);
}
private static @Nullable BaseContent createForHijacked(@NotNull Project project, @NotNull VirtualFile file) {
AbstractVcs vcs = ProjectLevelVcsManager.getInstance(project).getVcsFor(file);
if (vcs == null) return null;
DiffProvider diffProvider = vcs.getDiffProvider();
if (diffProvider == null) return null;
VcsRevisionNumber currentRevision = diffProvider.getCurrentRevision(file);
if (currentRevision == null) return null;
return new HijackedBaseContent(project, diffProvider, file, currentRevision);
}
private static @Nullable BaseContent createForModifiedDocument(@NotNull Project project, @NotNull VirtualFile file) {
AbstractVcs vcs = ProjectLevelVcsManager.getInstance(project).getVcsFor(file);
if (vcs == null) return null;
DiffProvider diffProvider = vcs.getDiffProvider();
ChangeProvider cp = vcs.getChangeProvider();
if (diffProvider == null || cp == null) return null;
if (!cp.isModifiedDocumentTrackingRequired()) return null;
ContentRevision beforeRevision = diffProvider.createCurrentFileContent(file);
if (beforeRevision == null) return null;
return createBaseContent(project, beforeRevision);
}
@NotNull
public static BaseContent createBaseContent(@NotNull Project project, @NotNull ContentRevision contentRevision) {
return new BaseContentImpl(project, contentRevision);
}
private static class BaseContentImpl implements BaseContent {
@NotNull private final Project myProject;
@NotNull private final ContentRevision myContentRevision;
BaseContentImpl(@NotNull Project project, @NotNull ContentRevision contentRevision) {
myProject = project;
myContentRevision = contentRevision;
}
@NotNull
@Override
public VcsRevisionNumber getRevisionNumber() {
return myContentRevision.getRevisionNumber();
}
@Nullable
@Override
public String loadContent() {
return loadContentRevision(myProject, myContentRevision);
}
}
private static class HijackedBaseContent implements BaseContent {
@Nullable private final Project myProject;
@NotNull private final DiffProvider myDiffProvider;
@NotNull private final VirtualFile myFile;
@NotNull private final VcsRevisionNumber myRevision;
HijackedBaseContent(@Nullable Project project,
@NotNull DiffProvider diffProvider,
@NotNull VirtualFile file,
@NotNull VcsRevisionNumber revision) {
myProject = project;
myDiffProvider = diffProvider;
myFile = file;
myRevision = revision;
}
@NotNull
@Override
public VcsRevisionNumber getRevisionNumber() {
return myRevision;
}
@Nullable
@Override
public String loadContent() {
ContentRevision contentRevision = myDiffProvider.createFileContent(myRevision, myFile);
if (contentRevision == null) return null;
return loadContentRevision(myProject, contentRevision);
}
}
@Nullable
private static String loadContentRevision(@Nullable Project project, @NotNull ContentRevision contentRevision) {
try {
if (contentRevision instanceof ByteBackedContentRevision) {
byte[] revisionContent = ((ByteBackedContentRevision)contentRevision).getContentAsBytes();
FilePath filePath = contentRevision.getFile();
if (revisionContent != null) {
return VcsImplUtil.loadTextFromBytes(project, revisionContent, filePath);
}
else {
return null;
}
}
else {
return contentRevision.getContent();
}
}
catch (VcsException ex) {
if (LOG.isDebugEnabled()) {
LOG.debug(ex);
}
return null;
}
}
}
| Mindula-Dilthushan/intellij-community | platform/vcs-impl/src/com/intellij/openapi/vcs/impl/LineStatusTrackerBaseContentUtil.java |
1,442 | /*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* Copyright (c) 2012, Stephen Colebourne & Michael Nascimento Santos
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of JSR-310 nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package java.time.chrono;
import static java.time.temporal.ChronoField.EPOCH_DAY;
import java.io.FilePermission;
import java.io.IOException;
import java.io.InputStream;
import java.io.InvalidObjectException;
import java.io.ObjectInputStream;
import java.io.Serializable;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.time.Clock;
import java.time.DateTimeException;
import java.time.Instant;
import java.time.LocalDate;
import java.time.ZoneId;
import java.time.format.ResolverStyle;
import java.time.temporal.ChronoField;
import java.time.temporal.TemporalAccessor;
import java.time.temporal.TemporalField;
import java.time.temporal.ValueRange;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Stream;
import sun.util.logging.PlatformLogger;
/**
* The Hijrah calendar is a lunar calendar supporting Islamic calendars.
* <p>
* The HijrahChronology follows the rules of the Hijrah calendar system. The Hijrah
* calendar has several variants based on differences in when the new moon is
* determined to have occurred and where the observation is made.
* In some variants the length of each month is
* computed algorithmically from the astronomical data for the moon and earth and
* in others the length of the month is determined by an authorized sighting
* of the new moon. For the algorithmically based calendars the calendar
* can project into the future.
* For sighting based calendars only historical data from past
* sightings is available.
* <p>
* The length of each month is 29 or 30 days.
* Ordinary years have 354 days; leap years have 355 days.
*
* <p>
* CLDR and LDML identify variants:
* <table class="striped" style="text-align:left">
* <caption style="display:none">Variants of Hijrah Calendars</caption>
* <thead>
* <tr>
* <th scope="col">Chronology ID</th>
* <th scope="col">Calendar Type</th>
* <th scope="col">Locale extension, see {@link java.util.Locale}</th>
* <th scope="col">Description</th>
* </tr>
* </thead>
* <tbody>
* <tr>
* <th scope="row">Hijrah-umalqura</th>
* <td>islamic-umalqura</td>
* <td>ca-islamic-umalqura</td>
* <td>Islamic - Umm Al-Qura calendar of Saudi Arabia</td>
* </tr>
* </tbody>
* </table>
* <p>Additional variants may be available through {@link Chronology#getAvailableChronologies()}.
*
* <p>Example</p>
* <p>
* Selecting the chronology from the locale uses {@link Chronology#ofLocale}
* to find the Chronology based on Locale supported BCP 47 extension mechanism
* to request a specific calendar ("ca"). For example,
* </p>
* <pre>
* Locale locale = Locale.forLanguageTag("en-US-u-ca-islamic-umalqura");
* Chronology chrono = Chronology.ofLocale(locale);
* </pre>
*
* @implSpec
* This class is immutable and thread-safe.
*
* @implNote
* Each Hijrah variant is configured individually. Each variant is defined by a
* property resource that defines the {@code ID}, the {@code calendar type},
* the start of the calendar, the alignment with the
* ISO calendar, and the length of each month for a range of years.
* The variants are loaded by HijrahChronology as a resource from
* hijrah-config-<calendar type>.properties.
* <p>
* The Hijrah property resource is a set of properties that describe the calendar.
* The syntax is defined by {@code java.util.Properties#load(Reader)}.
* <table class="striped" style="text-align:left">
* <caption style="display:none">Configuration of Hijrah Calendar</caption>
* <thead>
* <tr>
* <th scope="col">Property Name</th>
* <th scope="col">Property value</th>
* <th scope="col">Description</th>
* </tr>
* </thead>
* <tbody>
* <tr>
* <th scope="row">id</th>
* <td>Chronology Id, for example, "Hijrah-umalqura"</td>
* <td>The Id of the calendar in common usage</td>
* </tr>
* <tr>
* <th scope="row">type</th>
* <td>Calendar type, for example, "islamic-umalqura"</td>
* <td>LDML defines the calendar types</td>
* </tr>
* <tr>
* <th scope="row">version</th>
* <td>Version, for example: "1.8.0_1"</td>
* <td>The version of the Hijrah variant data</td>
* </tr>
* <tr>
* <th scope="row">iso-start</th>
* <td>ISO start date, formatted as {@code yyyy-MM-dd}, for example: "1900-04-30"</td>
* <td>The ISO date of the first day of the minimum Hijrah year.</td>
* </tr>
* <tr>
* <th scope="row">yyyy - a numeric 4 digit year, for example "1434"</th>
* <td>The value is a sequence of 12 month lengths,
* for example: "29 30 29 30 29 30 30 30 29 30 29 29"</td>
* <td>The lengths of the 12 months of the year separated by whitespace.
* A numeric year property must be present for every year without any gaps.
* The month lengths must be between 29-32 inclusive.
* </td>
* </tr>
* </tbody>
* </table>
* <p>
* Additional variants may be added by providing configuration properties files in
* {@code <JAVA_HOME>/conf/chronology} directory. The properties
* files should follow the naming convention of
* {@code hijrah-config-<chronology id>_<calendar type>.properties}.
*
* @since 1.8
*/
public final class HijrahChronology extends AbstractChronology implements Serializable {
/**
* The Hijrah Calendar id.
*/
private final transient String typeId;
/**
* The Hijrah calendarType.
*/
private final transient String calendarType;
/**
* Serialization version.
*/
@java.io.Serial
private static final long serialVersionUID = 3127340209035924785L;
/**
* Singleton instance of the Islamic Umm Al-Qura calendar of Saudi Arabia.
* Other Hijrah chronology variants may be available from
* {@link Chronology#getAvailableChronologies}.
*/
public static final HijrahChronology INSTANCE;
/**
* Flag to indicate the initialization of configuration data is complete.
* @see #checkCalendarInit()
*/
private transient volatile boolean initComplete;
/**
* Array of epoch days indexed by Hijrah Epoch month.
* Computed by {@link #loadCalendarData}.
*/
private transient int[] hijrahEpochMonthStartDays;
/**
* The minimum epoch day of this Hijrah calendar.
* Computed by {@link #loadCalendarData}.
*/
private transient int minEpochDay;
/**
* The maximum epoch day for which calendar data is available.
* Computed by {@link #loadCalendarData}.
*/
private transient int maxEpochDay;
/**
* The minimum epoch month.
* Computed by {@link #loadCalendarData}.
*/
private transient int hijrahStartEpochMonth;
/**
* The minimum length of a month.
* Computed by {@link #createEpochMonths}.
*/
private transient int minMonthLength;
/**
* The maximum length of a month.
* Computed by {@link #createEpochMonths}.
*/
private transient int maxMonthLength;
/**
* The minimum length of a year in days.
* Computed by {@link #createEpochMonths}.
*/
private transient int minYearLength;
/**
* The maximum length of a year in days.
* Computed by {@link #createEpochMonths}.
*/
private transient int maxYearLength;
/**
* Prefix of resource names for Hijrah calendar variants.
*/
private static final String RESOURCE_PREFIX = "hijrah-config-";
/**
* Suffix of resource names for Hijrah calendar variants.
*/
private static final String RESOURCE_SUFFIX = ".properties";
/**
* Static initialization of the built-in calendars.
* The data is not loaded until it is used.
*/
static {
INSTANCE = new HijrahChronology("Hijrah-umalqura", "islamic-umalqura");
// Register it by its aliases
AbstractChronology.registerChrono(INSTANCE, "Hijrah");
AbstractChronology.registerChrono(INSTANCE, "islamic");
// custom config chronologies
@SuppressWarnings("removal")
String javaHome = AccessController.doPrivileged((PrivilegedAction<String>)
() -> System.getProperty("java.home"));
CONF_PATH = Path.of(javaHome, "conf", "chronology");
registerCustomChrono();
}
/**
* Create a HijrahChronology for the named variant and type.
*
* @param id the id of the calendar
* @param calType the typeId of the calendar
* @throws IllegalArgumentException if the id or typeId is empty
*/
private HijrahChronology(String id, String calType) {
if (id.isEmpty()) {
throw new IllegalArgumentException("calendar id is empty");
}
if (calType.isEmpty()) {
throw new IllegalArgumentException("calendar typeId is empty");
}
this.typeId = id;
this.calendarType = calType;
}
/**
* Check and ensure that the calendar data has been initialized.
* The initialization check is performed at the boundary between
* public and package methods. If a public calls another public method
* a check is not necessary in the caller.
* The constructors of HijrahDate call {@link #getEpochDay} or
* {@link #getHijrahDateInfo} so every call from HijrahDate to a
* HijrahChronology via package private methods has been checked.
*
* @throws DateTimeException if the calendar data configuration is
* malformed or IOExceptions occur loading the data
*/
private void checkCalendarInit() {
// Keep this short so it can be inlined for performance
if (initComplete == false) {
loadCalendarData();
initComplete = true;
}
}
//-----------------------------------------------------------------------
/**
* Gets the ID of the chronology.
* <p>
* The ID uniquely identifies the {@code Chronology}. It can be used to
* lookup the {@code Chronology} using {@link Chronology#of(String)}.
*
* @return the chronology ID, non-null
* @see #getCalendarType()
*/
@Override
public String getId() {
return typeId;
}
/**
* Gets the calendar type of the Islamic calendar.
* <p>
* The calendar type is an identifier defined by the
* <em>Unicode Locale Data Markup Language (LDML)</em> specification.
* It can be used to lookup the {@code Chronology} using {@link Chronology#of(String)}.
*
* @return the calendar system type; non-null if the calendar has
* a standard type, otherwise null
* @see #getId()
*/
@Override
public String getCalendarType() {
return calendarType;
}
//-----------------------------------------------------------------------
/**
* Obtains a local date in Hijrah calendar system from the
* era, year-of-era, month-of-year and day-of-month fields.
*
* @param era the Hijrah era, not null
* @param yearOfEra the year-of-era
* @param month the month-of-year
* @param dayOfMonth the day-of-month
* @return the Hijrah local date, not null
* @throws DateTimeException if unable to create the date
* @throws ClassCastException if the {@code era} is not a {@code HijrahEra}
*/
@Override
public HijrahDate date(Era era, int yearOfEra, int month, int dayOfMonth) {
return date(prolepticYear(era, yearOfEra), month, dayOfMonth);
}
/**
* Obtains a local date in Hijrah calendar system from the
* proleptic-year, month-of-year and day-of-month fields.
*
* @param prolepticYear the proleptic-year
* @param month the month-of-year
* @param dayOfMonth the day-of-month
* @return the Hijrah local date, not null
* @throws DateTimeException if unable to create the date
*/
@Override
public HijrahDate date(int prolepticYear, int month, int dayOfMonth) {
return HijrahDate.of(this, prolepticYear, month, dayOfMonth);
}
/**
* Obtains a local date in Hijrah calendar system from the
* era, year-of-era and day-of-year fields.
*
* @param era the Hijrah era, not null
* @param yearOfEra the year-of-era
* @param dayOfYear the day-of-year
* @return the Hijrah local date, not null
* @throws DateTimeException if unable to create the date
* @throws ClassCastException if the {@code era} is not a {@code HijrahEra}
*/
@Override
public HijrahDate dateYearDay(Era era, int yearOfEra, int dayOfYear) {
return dateYearDay(prolepticYear(era, yearOfEra), dayOfYear);
}
/**
* Obtains a local date in Hijrah calendar system from the
* proleptic-year and day-of-year fields.
*
* @param prolepticYear the proleptic-year
* @param dayOfYear the day-of-year
* @return the Hijrah local date, not null
* @throws DateTimeException if the value of the year is out of range,
* or if the day-of-year is invalid for the year
*/
@Override
public HijrahDate dateYearDay(int prolepticYear, int dayOfYear) {
HijrahDate date = HijrahDate.of(this, prolepticYear, 1, 1);
if (dayOfYear > date.lengthOfYear()) {
throw new DateTimeException("Invalid dayOfYear: " + dayOfYear);
}
return date.plusDays(dayOfYear - 1);
}
/**
* Obtains a local date in the Hijrah calendar system from the epoch-day.
*
* @param epochDay the epoch day
* @return the Hijrah local date, not null
* @throws DateTimeException if unable to create the date
*/
@Override // override with covariant return type
public HijrahDate dateEpochDay(long epochDay) {
return HijrahDate.ofEpochDay(this, epochDay);
}
@Override
public HijrahDate dateNow() {
return dateNow(Clock.systemDefaultZone());
}
@Override
public HijrahDate dateNow(ZoneId zone) {
return dateNow(Clock.system(zone));
}
@Override
public HijrahDate dateNow(Clock clock) {
return date(LocalDate.now(clock));
}
@Override
public HijrahDate date(TemporalAccessor temporal) {
if (temporal instanceof HijrahDate) {
return (HijrahDate) temporal;
}
return HijrahDate.ofEpochDay(this, temporal.getLong(EPOCH_DAY));
}
@Override
@SuppressWarnings("unchecked")
public ChronoLocalDateTime<HijrahDate> localDateTime(TemporalAccessor temporal) {
return (ChronoLocalDateTime<HijrahDate>) super.localDateTime(temporal);
}
@Override
@SuppressWarnings("unchecked")
public ChronoZonedDateTime<HijrahDate> zonedDateTime(TemporalAccessor temporal) {
return (ChronoZonedDateTime<HijrahDate>) super.zonedDateTime(temporal);
}
@Override
@SuppressWarnings("unchecked")
public ChronoZonedDateTime<HijrahDate> zonedDateTime(Instant instant, ZoneId zone) {
return (ChronoZonedDateTime<HijrahDate>) super.zonedDateTime(instant, zone);
}
//-----------------------------------------------------------------------
@Override
public boolean isLeapYear(long prolepticYear) {
checkCalendarInit();
if (prolepticYear < getMinimumYear() || prolepticYear > getMaximumYear()) {
return false;
}
int len = getYearLength((int) prolepticYear);
return (len > 354);
}
@Override
public int prolepticYear(Era era, int yearOfEra) {
if (!(era instanceof HijrahEra)) {
throw new ClassCastException("Era must be HijrahEra");
}
return yearOfEra;
}
/**
* Creates the HijrahEra object from the numeric value.
* The Hijrah calendar system has only one era covering the
* proleptic years greater than zero.
* This method returns the singleton HijrahEra for the value 1.
*
* @param eraValue the era value
* @return the calendar system era, not null
* @throws DateTimeException if unable to create the era
*/
@Override
public HijrahEra eraOf(int eraValue) {
return switch (eraValue) {
case 1 -> HijrahEra.AH;
default -> throw new DateTimeException("invalid Hijrah era");
};
}
@Override
public List<Era> eras() {
return List.of(HijrahEra.values());
}
//-----------------------------------------------------------------------
@Override
public ValueRange range(ChronoField field) {
checkCalendarInit();
return switch (field) {
case DAY_OF_MONTH -> ValueRange.of(1, 1, getMinimumMonthLength(), getMaximumMonthLength());
case DAY_OF_YEAR -> ValueRange.of(1, getMaximumDayOfYear());
case ALIGNED_WEEK_OF_MONTH -> ValueRange.of(1, 5);
case YEAR, YEAR_OF_ERA -> ValueRange.of(getMinimumYear(), getMaximumYear());
case ERA -> ValueRange.of(1, 1);
default -> field.range();
};
}
//-----------------------------------------------------------------------
@Override // override for return type
public HijrahDate resolveDate(Map<TemporalField, Long> fieldValues, ResolverStyle resolverStyle) {
return (HijrahDate) super.resolveDate(fieldValues, resolverStyle);
}
//-----------------------------------------------------------------------
/**
* Check the validity of a year.
*
* @param prolepticYear the year to check
*/
int checkValidYear(long prolepticYear) {
if (prolepticYear < getMinimumYear() || prolepticYear > getMaximumYear()) {
throw new DateTimeException("Invalid Hijrah year: " + prolepticYear);
}
return (int) prolepticYear;
}
void checkValidDayOfYear(int dayOfYear) {
if (dayOfYear < 1 || dayOfYear > getMaximumDayOfYear()) {
throw new DateTimeException("Invalid Hijrah day of year: " + dayOfYear);
}
}
void checkValidMonth(int month) {
if (month < 1 || month > 12) {
throw new DateTimeException("Invalid Hijrah month: " + month);
}
}
//-----------------------------------------------------------------------
/**
* Returns an array containing the Hijrah year, month and day
* computed from the epoch day.
*
* @param epochDay the EpochDay
* @return int[0] = YEAR, int[1] = MONTH, int[2] = DATE
*/
int[] getHijrahDateInfo(int epochDay) {
checkCalendarInit(); // ensure that the chronology is initialized
if (epochDay < minEpochDay || epochDay >= maxEpochDay) {
throw new DateTimeException("Hijrah date out of range");
}
int epochMonth = epochDayToEpochMonth(epochDay);
int year = epochMonthToYear(epochMonth);
int month = epochMonthToMonth(epochMonth);
int day1 = epochMonthToEpochDay(epochMonth);
int date = epochDay - day1; // epochDay - dayOfEpoch(year, month);
int dateInfo[] = new int[3];
dateInfo[0] = year;
dateInfo[1] = month + 1; // change to 1-based.
dateInfo[2] = date + 1; // change to 1-based.
return dateInfo;
}
/**
* Return the epoch day computed from Hijrah year, month, and day.
*
* @param prolepticYear the year to represent, 0-origin
* @param monthOfYear the month-of-year to represent, 1-origin
* @param dayOfMonth the day-of-month to represent, 1-origin
* @return the epoch day
*/
long getEpochDay(int prolepticYear, int monthOfYear, int dayOfMonth) {
checkCalendarInit(); // ensure that the chronology is initialized
checkValidMonth(monthOfYear);
int epochMonth = yearToEpochMonth(prolepticYear) + (monthOfYear - 1);
if (epochMonth < 0 || epochMonth >= hijrahEpochMonthStartDays.length) {
throw new DateTimeException("Invalid Hijrah date, year: " +
prolepticYear + ", month: " + monthOfYear);
}
if (dayOfMonth < 1 || dayOfMonth > getMonthLength(prolepticYear, monthOfYear)) {
throw new DateTimeException("Invalid Hijrah day of month: " + dayOfMonth);
}
return epochMonthToEpochDay(epochMonth) + (dayOfMonth - 1);
}
/**
* Returns day of year for the year and month.
*
* @param prolepticYear a proleptic year
* @param month a month, 1-origin
* @return the day of year, 1-origin
*/
int getDayOfYear(int prolepticYear, int month) {
return yearMonthToDayOfYear(prolepticYear, (month - 1));
}
/**
* Returns month length for the year and month.
*
* @param prolepticYear a proleptic year
* @param monthOfYear a month, 1-origin.
* @return the length of the month
*/
int getMonthLength(int prolepticYear, int monthOfYear) {
int epochMonth = yearToEpochMonth(prolepticYear) + (monthOfYear - 1);
if (epochMonth < 0 || epochMonth >= hijrahEpochMonthStartDays.length) {
throw new DateTimeException("Invalid Hijrah date, year: " +
prolepticYear + ", month: " + monthOfYear);
}
return epochMonthLength(epochMonth);
}
/**
* Returns year length.
* Note: The 12th month must exist in the data.
*
* @param prolepticYear a proleptic year
* @return year length in days
*/
int getYearLength(int prolepticYear) {
return yearMonthToDayOfYear(prolepticYear, 12);
}
/**
* Return the minimum supported Hijrah year.
*
* @return the minimum
*/
int getMinimumYear() {
return epochMonthToYear(0);
}
/**
* Return the maximum supported Hijrah year.
*
* @return the minimum
*/
int getMaximumYear() {
return epochMonthToYear(hijrahEpochMonthStartDays.length - 1) - 1;
}
/**
* Returns maximum day-of-month.
*
* @return maximum day-of-month
*/
int getMaximumMonthLength() {
return maxMonthLength;
}
/**
* Returns smallest maximum day-of-month.
*
* @return smallest maximum day-of-month
*/
int getMinimumMonthLength() {
return minMonthLength;
}
/**
* Returns maximum day-of-year.
*
* @return maximum day-of-year
*/
int getMaximumDayOfYear() {
return maxYearLength;
}
/**
* Returns smallest maximum day-of-year.
*
* @return smallest maximum day-of-year
*/
int getSmallestMaximumDayOfYear() {
return minYearLength;
}
/**
* Returns the epochMonth found by locating the epochDay in the table. The
* epochMonth is the index in the table
*
* @param epochDay
* @return The index of the element of the start of the month containing the
* epochDay.
*/
private int epochDayToEpochMonth(int epochDay) {
// binary search
int ndx = Arrays.binarySearch(hijrahEpochMonthStartDays, epochDay);
if (ndx < 0) {
ndx = -ndx - 2;
}
return ndx;
}
/**
* Returns the year computed from the epochMonth
*
* @param epochMonth the epochMonth
* @return the Hijrah Year
*/
private int epochMonthToYear(int epochMonth) {
return (epochMonth + hijrahStartEpochMonth) / 12;
}
/**
* Returns the epochMonth for the Hijrah Year.
*
* @param year the HijrahYear
* @return the epochMonth for the beginning of the year.
*/
private int yearToEpochMonth(int year) {
return (year * 12) - hijrahStartEpochMonth;
}
/**
* Returns the Hijrah month from the epochMonth.
*
* @param epochMonth the epochMonth
* @return the month of the Hijrah Year
*/
private int epochMonthToMonth(int epochMonth) {
return (epochMonth + hijrahStartEpochMonth) % 12;
}
/**
* Returns the epochDay for the start of the epochMonth.
*
* @param epochMonth the epochMonth
* @return the epochDay for the start of the epochMonth.
*/
private int epochMonthToEpochDay(int epochMonth) {
return hijrahEpochMonthStartDays[epochMonth];
}
/**
* Returns the day of year for the requested HijrahYear and month.
*
* @param prolepticYear the Hijrah year
* @param month the Hijrah month
* @return the day of year for the start of the month of the year
*/
private int yearMonthToDayOfYear(int prolepticYear, int month) {
int epochMonthFirst = yearToEpochMonth(prolepticYear);
return epochMonthToEpochDay(epochMonthFirst + month)
- epochMonthToEpochDay(epochMonthFirst);
}
/**
* Returns the length of the epochMonth. It is computed from the start of
* the following month minus the start of the requested month.
*
* @param epochMonth the epochMonth; assumed to be within range
* @return the length in days of the epochMonth
*/
private int epochMonthLength(int epochMonth) {
// The very last entry in the epochMonth table is not the start of a month
return hijrahEpochMonthStartDays[epochMonth + 1]
- hijrahEpochMonthStartDays[epochMonth];
}
//-----------------------------------------------------------------------
private static final String KEY_ID = "id";
private static final String KEY_TYPE = "type";
private static final String KEY_VERSION = "version";
private static final String KEY_ISO_START = "iso-start";
private static final Path CONF_PATH;
/**
* Return the configuration properties from the resource.
* <p>
* The location of the variant configuration resource is:
* <pre>
* "/java/time/chrono/" (for "islamic-umalqura" type), or
* "<JAVA_HOME>/conf/chronology/" +
* "hijrah-config-" + chronologyId + "_" + calendarType + ".properties"
* </pre>
*
* @param chronologyId the chronology ID of the calendar variant
* @param calendarType the calendarType of the calendar variant
* @return a Properties containing the properties read from the resource.
* @throws Exception if access to the property resource fails
*/
private static Properties readConfigProperties(final String chronologyId, final String calendarType) throws Exception {
String resourceName = RESOURCE_PREFIX + chronologyId + "_" + calendarType + RESOURCE_SUFFIX;
PrivilegedAction<InputStream> getResourceAction = calendarType.equals("islamic-umalqura") ?
() -> HijrahChronology.class.getResourceAsStream(resourceName) :
() -> {
try {
return Files.newInputStream(CONF_PATH.resolve(resourceName),
StandardOpenOption.READ);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
FilePermission perm1 = new FilePermission("<<ALL FILES>>", "read");
RuntimePermission perm2 = new RuntimePermission("accessSystemModules");
try (@SuppressWarnings("removal") InputStream is = AccessController.doPrivileged(getResourceAction, null, perm1, perm2)) {
if (is == null) {
throw new RuntimeException("Hijrah calendar resource not found: " + resourceName);
}
Properties props = new Properties();
props.load(is);
return props;
}
}
/**
* Loads and processes the Hijrah calendar properties file for this calendarType.
* The starting Hijrah date and the corresponding ISO date are
* extracted and used to calculate the epochDate offset.
* The version number is identified and ignored.
* Everything else is the data for a year with containing the length of each
* of 12 months.
*
* @throws DateTimeException if initialization of the calendar data from the
* resource fails
*/
private void loadCalendarData() {
try {
Properties props = readConfigProperties(typeId, calendarType);
Map<Integer, int[]> years = new HashMap<>();
int minYear = Integer.MAX_VALUE;
int maxYear = Integer.MIN_VALUE;
String id = null;
String type = null;
String version = null;
int isoStart = 0;
for (Map.Entry<Object, Object> entry : props.entrySet()) {
String key = (String) entry.getKey();
switch (key) {
case KEY_ID:
id = (String)entry.getValue();
break;
case KEY_TYPE:
type = (String)entry.getValue();
break;
case KEY_VERSION:
version = (String)entry.getValue();
break;
case KEY_ISO_START: {
int[] ymd = parseYMD((String) entry.getValue());
isoStart = (int) LocalDate.of(ymd[0], ymd[1], ymd[2]).toEpochDay();
break;
}
default:
try {
// Everything else is either a year or invalid
int year = Integer.parseInt(key);
int[] months = parseMonths((String) entry.getValue());
years.put(year, months);
maxYear = Math.max(maxYear, year);
minYear = Math.min(minYear, year);
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException("bad key: " + key);
}
}
}
if (!getId().equals(id)) {
throw new IllegalArgumentException("Configuration is for a different calendar: " + id);
}
if (!getCalendarType().equals(type)) {
throw new IllegalArgumentException("Configuration is for a different calendar type: " + type);
}
if (version == null || version.isEmpty()) {
throw new IllegalArgumentException("Configuration does not contain a version");
}
if (isoStart == 0) {
throw new IllegalArgumentException("Configuration does not contain a ISO start date");
}
// Now create and validate the array of epochDays indexed by epochMonth
hijrahStartEpochMonth = minYear * 12;
minEpochDay = isoStart;
hijrahEpochMonthStartDays = createEpochMonths(minEpochDay, minYear, maxYear, years);
maxEpochDay = hijrahEpochMonthStartDays[hijrahEpochMonthStartDays.length - 1];
// Compute the min and max year length in days.
for (int year = minYear; year < maxYear; year++) {
int length = getYearLength(year);
minYearLength = Math.min(minYearLength, length);
maxYearLength = Math.max(maxYearLength, length);
}
} catch (Exception ex) {
// Log error and throw a DateTimeException
PlatformLogger logger = PlatformLogger.getLogger("java.time.chrono");
logger.severe("Unable to initialize Hijrah calendar proxy: " + typeId, ex);
throw new DateTimeException("Unable to initialize HijrahCalendar: " + typeId, ex);
}
}
/**
* Converts the map of year to month lengths ranging from minYear to maxYear
* into a linear contiguous array of epochDays. The index is the hijrahMonth
* computed from year and month and offset by minYear. The value of each
* entry is the epochDay corresponding to the first day of the month.
*
* @param minYear The minimum year for which data is provided
* @param maxYear The maximum year for which data is provided
* @param years a Map of year to the array of 12 month lengths
* @return array of epochDays for each month from min to max
*/
private int[] createEpochMonths(int epochDay, int minYear, int maxYear, Map<Integer, int[]> years) {
// Compute the size for the array of dates
int numMonths = (maxYear - minYear + 1) * 12 + 1;
// Initialize the running epochDay as the corresponding ISO Epoch day
int epochMonth = 0; // index into array of epochMonths
int[] epochMonths = new int[numMonths];
minMonthLength = Integer.MAX_VALUE;
maxMonthLength = Integer.MIN_VALUE;
// Only whole years are valid, any zero's in the array are illegal
for (int year = minYear; year <= maxYear; year++) {
int[] months = years.get(year);// must not be gaps
for (int month = 0; month < 12; month++) {
int length = months[month];
epochMonths[epochMonth++] = epochDay;
if (length < 29 || length > 32) {
throw new IllegalArgumentException("Invalid month length in year: " + minYear);
}
epochDay += length;
minMonthLength = Math.min(minMonthLength, length);
maxMonthLength = Math.max(maxMonthLength, length);
}
}
// Insert the final epochDay
epochMonths[epochMonth++] = epochDay;
if (epochMonth != epochMonths.length) {
throw new IllegalStateException("Did not fill epochMonths exactly: ndx = " + epochMonth
+ " should be " + epochMonths.length);
}
return epochMonths;
}
/**
* Parses the 12 months lengths from a property value for a specific year.
*
* @param line the value of a year property
* @return an array of int[12] containing the 12 month lengths
* @throws IllegalArgumentException if the number of months is not 12
* @throws NumberFormatException if the 12 tokens are not numbers
*/
private int[] parseMonths(String line) {
int[] months = new int[12];
String[] numbers = line.split("\\s");
if (numbers.length != 12) {
throw new IllegalArgumentException("wrong number of months on line: " + Arrays.toString(numbers) + "; count: " + numbers.length);
}
for (int i = 0; i < 12; i++) {
try {
months[i] = Integer.parseInt(numbers[i]);
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException("bad key: " + numbers[i]);
}
}
return months;
}
/**
* Parse yyyy-MM-dd into a 3 element array [yyyy, mm, dd].
*
* @param string the input string
* @return the 3 element array with year, month, day
*/
private int[] parseYMD(String string) {
// yyyy-MM-dd
string = string.trim();
try {
if (string.charAt(4) != '-' || string.charAt(7) != '-') {
throw new IllegalArgumentException("date must be yyyy-MM-dd");
}
int[] ymd = new int[3];
ymd[0] = Integer.parseInt(string, 0, 4, 10);
ymd[1] = Integer.parseInt(string, 5, 7, 10);
ymd[2] = Integer.parseInt(string, 8, 10, 10);
return ymd;
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("date must be yyyy-MM-dd", ex);
}
}
/**
* Look for Hijrah chronology variant properties files in
* <JAVA_HOME>/conf/chronology directory. Then register its chronology, if any.
*/
@SuppressWarnings("removal")
private static void registerCustomChrono() {
AccessController.doPrivileged(
(PrivilegedAction<Void>)() -> {
if (Files.isDirectory(CONF_PATH)) {
try (Stream<Path> stream = Files.list(CONF_PATH)) {
stream.map(p -> p.getFileName().toString())
.filter(fn -> fn.matches("hijrah-config-[^\\.]+\\.properties"))
.map(fn -> fn.replaceAll("(hijrah-config-|\\.properties)", ""))
.forEach(idtype -> {
int delimiterPos = idtype.indexOf('_');
// '_' should be somewhere in the middle of idtype
if (delimiterPos > 1 && delimiterPos < idtype.length() - 1) {
AbstractChronology.registerChrono(
new HijrahChronology(
idtype.substring(0, delimiterPos),
idtype.substring(delimiterPos + 1)));
} else {
PlatformLogger.getLogger("java.time.chrono")
.warning("Hijrah custom config init failed." +
"'<id>_<type>' name convention not followed: " + idtype);
}
});
} catch (IOException e) {
PlatformLogger.getLogger("java.time.chrono")
.warning("Hijrah custom config init failed.", e);
}
}
return null;
},
null,
new FilePermission("<<ALL FILES>>", "read"));
}
//-----------------------------------------------------------------------
/**
* Writes the Chronology using a
* <a href="{@docRoot}/serialized-form.html#java.time.chrono.Ser">dedicated serialized form</a>.
* @serialData
* <pre>
* out.writeByte(1); // identifies a Chronology
* out.writeUTF(getId());
* </pre>
*
* @return the instance of {@code Ser}, not null
*/
@Override
@java.io.Serial
Object writeReplace() {
return super.writeReplace();
}
/**
* Defend against malicious streams.
*
* @param s the stream to read
* @throws InvalidObjectException always
*/
@java.io.Serial
private void readObject(ObjectInputStream s) throws InvalidObjectException {
throw new InvalidObjectException("Deserialization via serialization delegate");
}
}
| openjdk/jdk | src/java.base/share/classes/java/time/chrono/HijrahChronology.java |
1,443 | /* Copyright (C) 2017 Brian P. Hinz
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
package com.tigervnc.vncviewer;
import java.awt.*;
import java.awt.event.*;
import java.util.*;
import javax.swing.*;
import com.tigervnc.rfb.*;
import static java.awt.event.KeyEvent.*;
import static com.tigervnc.rfb.Keysymdef.*;
public class KeyMap {
public final static int NoSymbol = 0;
private static final HashMap<Integer, Character> code_map_java_to_char;
static {
// Certain KeyStrokes fail to produce a valid character (CTRL+ALT+...
// on Windows and Mac almost never does). For these cases, the best
// we can try to do is to map to the ASCII symbol from the keyCode.
code_map_java_to_char = new HashMap<Integer, Character>();
for (int c=32; c<0x7f; c++) {
int keyCode = KeyEvent.getExtendedKeyCodeForChar(c);
if (keyCode != KeyEvent.VK_UNDEFINED)
// Not all ASCII characters have VK_ constants, see vk_to_ascii()
code_map_java_to_char.put(keyCode, (char)c);
}
}
private static int[][] vkey_map = {
/* KEYCODE LOCATION */
/* UNKNOWN STANDARD LEFT RIGHT NUMPAD */
{ VK_BACK_SPACE, NoSymbol, XK_BackSpace, NoSymbol, NoSymbol, NoSymbol },
{ VK_TAB, NoSymbol, XK_Tab, NoSymbol, NoSymbol, NoSymbol },
{ VK_CANCEL, NoSymbol, XK_Cancel, NoSymbol, NoSymbol, NoSymbol },
{ VK_ENTER, NoSymbol, XK_Return, NoSymbol, NoSymbol, XK_KP_Enter },
{ VK_SHIFT, NoSymbol, XK_Shift_L, XK_Shift_L, XK_Shift_R, NoSymbol },
{ VK_CONTROL, NoSymbol, XK_Control_L, XK_Control_L, XK_Control_R, NoSymbol },
{ VK_ALT, NoSymbol, XK_Alt_L, XK_Alt_L, XK_Alt_R, NoSymbol },
/* VK_PAUSE left out on purpose because interpretation depends on state of CTRL. See further down. */
{ VK_CAPS_LOCK, NoSymbol, XK_Caps_Lock, NoSymbol, NoSymbol, NoSymbol },
{ VK_ESCAPE, NoSymbol, XK_Escape, NoSymbol, NoSymbol, NoSymbol },
{ VK_END, NoSymbol, XK_End, NoSymbol, NoSymbol, XK_KP_End },
{ VK_HOME, NoSymbol, XK_Home, NoSymbol, NoSymbol, XK_KP_Home },
{ VK_LEFT, NoSymbol, XK_Left, NoSymbol, NoSymbol, XK_KP_Left },
{ VK_UP, NoSymbol, XK_Up, NoSymbol, NoSymbol, XK_KP_Up },
{ VK_RIGHT, NoSymbol, XK_Right, NoSymbol, NoSymbol, XK_KP_Right },
{ VK_DOWN, NoSymbol, XK_Down, NoSymbol, NoSymbol, XK_KP_Down },
/* VK_PRINTSCREEN left out on purpose because interpretation depends on state of CTRL. See further down. */
{ VK_PAGE_UP, NoSymbol, XK_Page_Up, NoSymbol, NoSymbol, XK_KP_Page_Up },
{ VK_PAGE_DOWN, NoSymbol, XK_Page_Down, NoSymbol, NoSymbol, XK_KP_Page_Down },
{ VK_BEGIN, NoSymbol, XK_Begin, NoSymbol, NoSymbol, XK_KP_Begin },
{ VK_KP_LEFT, NoSymbol, XK_KP_Left, NoSymbol, NoSymbol, XK_KP_Left },
{ VK_KP_UP, NoSymbol, XK_KP_Up, NoSymbol, NoSymbol, XK_KP_Up },
{ VK_KP_RIGHT, NoSymbol, XK_KP_Right, NoSymbol, NoSymbol, XK_KP_Right },
{ VK_KP_DOWN, NoSymbol, XK_KP_Down, NoSymbol, NoSymbol, XK_KP_Down },
{ VK_INSERT, NoSymbol, XK_Insert, NoSymbol, NoSymbol, XK_KP_Insert },
{ VK_DELETE, NoSymbol, XK_Delete, NoSymbol, NoSymbol, XK_KP_Delete },
{ VK_WINDOWS, NoSymbol, NoSymbol, XK_Super_L, XK_Super_R, NoSymbol },
{ VK_CONTEXT_MENU, NoSymbol, XK_Menu, NoSymbol, NoSymbol, NoSymbol },
{ VK_NUMPAD0, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_0 },
{ VK_NUMPAD1, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_1 },
{ VK_NUMPAD2, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_2 },
{ VK_NUMPAD3, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_3 },
{ VK_NUMPAD4, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_4 },
{ VK_NUMPAD5, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_5 },
{ VK_NUMPAD6, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_6 },
{ VK_NUMPAD7, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_7 },
{ VK_NUMPAD8, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_8 },
{ VK_NUMPAD9, NoSymbol, NoSymbol, NoSymbol, NoSymbol, XK_KP_9 },
{ VK_MULTIPLY, NoSymbol, XK_KP_Multiply, NoSymbol, NoSymbol, XK_KP_Multiply },
{ VK_ADD, NoSymbol, XK_KP_Add, NoSymbol, NoSymbol, XK_KP_Add },
{ VK_SUBTRACT, NoSymbol, XK_KP_Subtract, NoSymbol, NoSymbol, XK_KP_Subtract },
{ VK_DIVIDE, NoSymbol, XK_KP_Divide, NoSymbol, NoSymbol, XK_KP_Divide },
{ VK_SEPARATER, NoSymbol, XK_KP_Separator, NoSymbol, NoSymbol, XK_KP_Separator },
{ VK_DECIMAL, NoSymbol, XK_KP_Decimal, NoSymbol, NoSymbol, XK_KP_Decimal },
{ VK_F1, NoSymbol, XK_F1, XK_L1, XK_R1, NoSymbol },
{ VK_F2, NoSymbol, XK_F2, XK_L2, XK_R2, NoSymbol },
{ VK_F3, NoSymbol, XK_F3, XK_L3, XK_R3, NoSymbol },
{ VK_F4, NoSymbol, XK_F4, XK_L4, XK_R4, NoSymbol },
{ VK_F5, NoSymbol, XK_F5, XK_L5, XK_R5, NoSymbol },
{ VK_F6, NoSymbol, XK_F6, XK_L6, XK_R6, NoSymbol },
{ VK_F7, NoSymbol, XK_F7, XK_L7, XK_R7, NoSymbol },
{ VK_F8, NoSymbol, XK_F8, XK_L8, XK_R8, NoSymbol },
{ VK_F9, NoSymbol, XK_F9, XK_L9, XK_R9, NoSymbol },
{ VK_F10, NoSymbol, XK_F10, XK_L10, XK_R10, NoSymbol },
{ VK_F11, NoSymbol, XK_F11, NoSymbol, XK_R11, NoSymbol },
{ VK_F12, NoSymbol, XK_F12, NoSymbol, XK_R12, NoSymbol },
{ VK_F13, NoSymbol, XK_F13, NoSymbol, XK_R13, NoSymbol },
{ VK_F14, NoSymbol, XK_F14, NoSymbol, XK_R14, NoSymbol },
{ VK_F15, NoSymbol, XK_F15, NoSymbol, XK_R15, NoSymbol },
{ VK_F16, NoSymbol, XK_F16, NoSymbol, NoSymbol, NoSymbol },
{ VK_F17, NoSymbol, XK_F17, NoSymbol, NoSymbol, NoSymbol },
{ VK_F18, NoSymbol, XK_F18, NoSymbol, NoSymbol, NoSymbol },
{ VK_F19, NoSymbol, XK_F19, NoSymbol, NoSymbol, NoSymbol },
{ VK_F20, NoSymbol, XK_F20, NoSymbol, NoSymbol, NoSymbol },
{ VK_F21, NoSymbol, XK_F21, NoSymbol, NoSymbol, NoSymbol },
{ VK_F22, NoSymbol, XK_F22, NoSymbol, NoSymbol, NoSymbol },
{ VK_F23, NoSymbol, XK_F23, NoSymbol, NoSymbol, NoSymbol },
{ VK_F24, NoSymbol, XK_F24, NoSymbol, NoSymbol, NoSymbol },
{ VK_NUM_LOCK, NoSymbol, XK_Num_Lock, NoSymbol, NoSymbol, XK_Num_Lock },
{ VK_SCROLL_LOCK, NoSymbol, XK_Scroll_Lock, NoSymbol, NoSymbol, NoSymbol },
{ VK_ALT_GRAPH, NoSymbol, XK_ISO_Level3_Shift, NoSymbol, NoSymbol, NoSymbol },
{ VK_META, NoSymbol, NoSymbol, XK_Meta_L, XK_Meta_R, NoSymbol },
{ VK_MODECHANGE, NoSymbol, XK_Mode_switch, NoSymbol, NoSymbol, NoSymbol },
{ VK_CLEAR, NoSymbol, XK_Clear, NoSymbol, NoSymbol, XK_KP_Begin },
{ VK_AGAIN, NoSymbol, XK_Redo, NoSymbol, NoSymbol, NoSymbol },
{ VK_UNDO, NoSymbol, XK_Undo, NoSymbol, NoSymbol, NoSymbol },
{ VK_FIND, NoSymbol, XK_Find, NoSymbol, NoSymbol, NoSymbol },
{ VK_STOP, NoSymbol, XK_Cancel, NoSymbol, NoSymbol, NoSymbol },
{ VK_HELP, NoSymbol, XK_Help, NoSymbol, NoSymbol, NoSymbol },
{ VK_KANJI, NoSymbol, XK_Kanji, NoSymbol, NoSymbol, NoSymbol },
{ VK_KATAKANA, NoSymbol, XK_Katakana, NoSymbol, NoSymbol, NoSymbol },
{ VK_HIRAGANA, NoSymbol, XK_Hiragana, NoSymbol, NoSymbol, NoSymbol },
{ VK_PREVIOUS_CANDIDATE, NoSymbol, XK_PreviousCandidate, NoSymbol, NoSymbol, NoSymbol },
{ VK_CODE_INPUT, NoSymbol, XK_Codeinput, NoSymbol, NoSymbol, NoSymbol },
{ VK_JAPANESE_ROMAN, NoSymbol, XK_Romaji, NoSymbol, NoSymbol, NoSymbol },
{ VK_KANA_LOCK, NoSymbol, XK_Kana_Lock, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_ABOVEDOT, NoSymbol, XK_dead_abovedot, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_ABOVERING, NoSymbol, XK_dead_abovering, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_ACUTE, NoSymbol, XK_dead_acute, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_BREVE, NoSymbol, XK_dead_breve, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_CARON, NoSymbol, XK_dead_caron, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_CEDILLA, NoSymbol, XK_dead_cedilla, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_CIRCUMFLEX, NoSymbol, XK_dead_circumflex, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_DIAERESIS, NoSymbol, XK_dead_diaeresis, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_DOUBLEACUTE, NoSymbol, XK_dead_doubleacute, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_GRAVE, NoSymbol, XK_dead_grave, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_IOTA, NoSymbol, XK_dead_iota, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_MACRON, NoSymbol, XK_dead_macron, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_OGONEK, NoSymbol, XK_dead_ogonek, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_SEMIVOICED_SOUND, NoSymbol, XK_dead_semivoiced_sound, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_TILDE, NoSymbol, XK_dead_tilde, NoSymbol, NoSymbol, NoSymbol },
{ VK_DEAD_VOICED_SOUND, NoSymbol, XK_dead_voiced_sound, NoSymbol, NoSymbol, NoSymbol },
{ VK_ALPHANUMERIC, NoSymbol, XK_Eisu_Shift, NoSymbol, NoSymbol, NoSymbol },
{ VK_ALL_CANDIDATES, NoSymbol, XK_MultipleCandidate, NoSymbol, NoSymbol, NoSymbol },
{ VK_KANA, NoSymbol, XK_Kana_Shift, NoSymbol, NoSymbol, NoSymbol },
{ VK_JAPANESE_KATAKANA, NoSymbol, XK_Katakana, NoSymbol, NoSymbol, NoSymbol },
{ VK_JAPANESE_HIRAGANA, NoSymbol, XK_Hiragana, NoSymbol, NoSymbol, NoSymbol },
{ VK_COMPOSE, NoSymbol, XK_Multi_key, NoSymbol, NoSymbol, NoSymbol },
};
public static int vkey_to_keysym(KeyEvent ev)
{
int keyCode = get_keycode_fallback_extended(ev);
// Start with keys that either don't generate a symbol, or
// generate the same symbol as some other key.
if (keyCode == KeyEvent.VK_PAUSE)
return (ev.isControlDown() ? XK_Break : XK_Pause);
else if (keyCode == KeyEvent.VK_PRINTSCREEN)
return (ev.isControlDown() ? XK_Sys_Req : XK_Print);
else
for(int i = 0; i < vkey_map.length; i++)
if (keyCode == vkey_map[i][0])
return vkey_map[i][ev.getKeyLocation()+1];
// Unknown special key?
if (KeyEvent.getKeyText(keyCode).isEmpty()) {
vlog.error("Unknown key code: 0x%04x", keyCode);
return NoSymbol;
}
// Pressing Ctrl wreaks havoc with the symbol lookup...
int ucs = (int)ev.getKeyChar();
if (ev.isControlDown()) {
// For CTRL-<letter>, CTRL is sent separately, so just send <letter>.
if ((ucs >= 1 && ucs <= 26 && !ev.isShiftDown()) ||
// CTRL-{, CTRL-|, CTRL-} also map to ASCII 96-127
(ucs >= 27 && ucs <= 29 && ev.isShiftDown()))
ucs += 96;
// For CTRL-SHIFT-<letter>, send capital <letter> to emulate behavior
// of Linux. For CTRL-@, send @. For CTRL-_, send _. For CTRL-^,
// send ^.
else if (ucs < 32)
ucs += 64;
// If it's still undefined, map the keyCode to ASCII symbol
else if (keyCode >= 0 && keyCode <= 127)
if (ucs == CHAR_UNDEFINED || ev.isAltDown())
ucs = vk_to_ascii(keyCode, ev.isShiftDown());
else if (VncViewer.os.startsWith("mac os x") && ev.isMetaDown())
// Alt on OS X behaves more like AltGr on other systems, and to get
// sane behaviour we should translate things in that manner for the
// remote VNC server. However that means we lose the ability to use
// Alt as a shortcut modifier. Do what RealVNC does and hijack the
// left command key as an Alt replacement.
ucs = vk_to_ascii(keyCode, ev.isShiftDown());
}
// Dead keys are represented by their spacing equivalent
// (or something similar depending on the layout)
if (Character.getType(ucs) == Character.COMBINING_SPACING_MARK)
return Keysym2ucs.ucs2keysym(Keysym2ucs.ucs2combining(ucs));
if (Character.isDefined(ucs))
return Keysym2ucs.ucs2keysym(ucs);
return NoSymbol;
}
public static int get_keycode_fallback_extended(final KeyEvent ev) {
final int keyCode = ev.getKeyCode();
return (keyCode == 0) ? ev.getExtendedKeyCode() : keyCode;
}
private static int vk_to_ascii(int vk, boolean shift) {
char c = 0;
if (code_map_java_to_char.containsKey(vk))
c = code_map_java_to_char.get(vk);
// 0x25 (%) and 0x3F (?) do not have VK_ constants
if (vk == VK_5)
c = shift ? '%' : c;
else if (vk == VK_SLASH)
c = shift ? '?' : c;
if (Character.isLetter(c))
c = shift ? Character.toUpperCase(c) : Character.toLowerCase(c);
return (int)c;
}
static LogWriter vlog = new LogWriter("KeyMap");
}
| TigerVNC/tigervnc | java/com/tigervnc/vncviewer/KeyMap.java |
1,444 | package org.bouncycastle.est;
import java.net.URL;
import org.bouncycastle.util.Arrays;
/**
* Builder for basic EST requests
*/
public class ESTRequestBuilder
{
private final String method;
private URL url;
private HttpUtil.Headers headers;
ESTHijacker hijacker;
ESTSourceConnectionListener listener;
ESTClient client;
private byte[] data;
public ESTRequestBuilder(ESTRequest request)
{
this.method = request.method;
this.url = request.url;
this.listener = request.listener;
this.data = request.data;
this.hijacker = request.hijacker;
this.headers = (HttpUtil.Headers)request.headers.clone();
this.client = request.getClient();
}
public ESTRequestBuilder(String method, URL url)
{
this.method = method;
this.url = url;
this.headers = new HttpUtil.Headers();
}
public ESTRequestBuilder withConnectionListener(ESTSourceConnectionListener listener)
{
this.listener = listener;
return this;
}
public ESTRequestBuilder withHijacker(ESTHijacker hijacker)
{
this.hijacker = hijacker;
return this;
}
public ESTRequestBuilder withURL(URL url)
{
this.url = url;
return this;
}
public ESTRequestBuilder withData(byte[] data)
{
this.data = Arrays.clone(data);
return this;
}
public ESTRequestBuilder addHeader(String key, String value)
{
headers.add(key, value);
return this;
}
public ESTRequestBuilder setHeader(String key, String value)
{
headers.set(key, value);
return this;
}
public ESTRequestBuilder withClient(ESTClient client)
{
this.client = client;
return this;
}
public ESTRequest build()
{
return new ESTRequest(method, url, data, hijacker, listener, headers, client);
}
}
| bcgit/bc-java | pkix/src/main/java/org/bouncycastle/est/ESTRequestBuilder.java |
1,445 | /*******************************************************************************
* Copyright 2011, 2012 Chris Banes.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package org.qii.weiciyuan.support.lib.pulltorefresh.internal;
import android.view.View;
/**
* Interface that allows PullToRefreshBase to hijack the call to
* AdapterView.setEmptyView()
*
* @author chris
*/
public interface EmptyViewMethodAccessor {
/**
* Calls upto AdapterView.setEmptyView()
*
* @param emptyView - to set as Empty View
*/
public void setEmptyViewInternal(View emptyView);
/**
* Should call PullToRefreshBase.setEmptyView() which will then
* automatically call through to setEmptyViewInternal()
*
* @param emptyView - to set as Empty View
*/
public void setEmptyView(View emptyView);
}
| qii/weiciyuan | src/org/qii/weiciyuan/support/lib/pulltorefresh/internal/EmptyViewMethodAccessor.java |
1,446 | /* Copyright (c) 2012 Tobias Wolf, All Rights Reserved
*
* The contents of this file is dual-licensed under 2
* alternative Open Source/Free licenses: LGPL 2.1 or later and
* Apache License 2.0. (starting with JNA version 4.0.0).
*
* You can freely decide which license you want to apply to
* the project.
*
* You may obtain a copy of the LGPL License at:
*
* http://www.gnu.org/licenses/licenses.html
*
* A copy is also included in the downloadable source code package
* containing JNA, in file "LGPL2.1".
*
* You may obtain a copy of the Apache License at:
*
* http://www.apache.org/licenses/
*
* A copy is also included in the downloadable source code package
* containing JNA, in file "AL2.0".
*/
package com.sun.jna.platform.win32;
import com.sun.jna.Memory;
import com.sun.jna.Native;
import com.sun.jna.Pointer;
import com.sun.jna.Structure;
import com.sun.jna.Structure.FieldOrder;
import com.sun.jna.platform.win32.Guid.GUID;
import com.sun.jna.platform.win32.OaIdl.DISPID;
import com.sun.jna.platform.win32.OaIdl.SAFEARRAY;
import com.sun.jna.platform.win32.OaIdl.SAFEARRAYBOUND;
import com.sun.jna.platform.win32.Variant.VARIANT;
import com.sun.jna.platform.win32.Variant.VariantArg;
import com.sun.jna.platform.win32.WTypes.BSTR;
import com.sun.jna.platform.win32.WTypes.VARTYPE;
import com.sun.jna.platform.win32.WTypes.VARTYPEByReference;
import com.sun.jna.platform.win32.WinBase.SYSTEMTIME;
import com.sun.jna.platform.win32.WinDef.LCID;
import com.sun.jna.platform.win32.WinDef.LONG;
import com.sun.jna.platform.win32.WinDef.PVOID;
import com.sun.jna.platform.win32.WinDef.UINT;
import com.sun.jna.platform.win32.WinNT.HRESULT;
import com.sun.jna.ptr.DoubleByReference;
import com.sun.jna.ptr.PointerByReference;
import com.sun.jna.win32.StdCallLibrary;
import com.sun.jna.win32.W32APIOptions;
/**
* Oleaut32.dll Interface.
*
* @author scott.palmer
*/
public interface OleAuto extends StdCallLibrary {
/**
* The instance.
*/
OleAuto INSTANCE = Native.load("OleAut32", OleAuto.class, W32APIOptions.DEFAULT_OPTIONS);
/* Flags for IDispatch::Invoke */
/**
* The Constant DISPATCH_METHOD.
*/
int DISPATCH_METHOD = 0x1;
/**
* The Constant DISPATCH_PROPERTYGET.
*/
int DISPATCH_PROPERTYGET = 0x2;
/**
* The Constant DISPATCH_PROPERTYPUT.
*/
int DISPATCH_PROPERTYPUT = 0x4;
/**
* The Constant DISPATCH_PROPERTYPUTREF.
*/
int DISPATCH_PROPERTYPUTREF = 0x8;
/**
* An array that is allocated on the stac.
*/
int FADF_AUTO = 0x0001;
/**
* An array that is statically allocated.
*/
int FADF_STATIC = 0x0002;
/**
* An array that is embedded in a structure.
*/
int FADF_EMBEDDED = 0x0004;
/**
* An array that is embedded in a structure.
*/
int FADF_FIXEDSIZE = 0x0010;
/**
* An array that is embedded in a structure.
*/
int FADF_RECORD = 0x0020;
/**
* An array that is embedded in a structure.
*/
int FADF_HAVEIID = 0x0040;
/**
* An array that has a variant type. The variant type can be retrieved with
* SafeArrayGetVartype.
*/
int FADF_HAVEVARTYPE = 0x0080;
/**
* An array of BSTRs.
*/
int FADF_BSTR = 0x0100;
/**
* An array of IUnknown*.
*/
int FADF_UNKNOWN = 0x0200;
/**
* An array of IDispatch*.
*/
int FADF_DISPATCH = 0x0400;
/**
* An array of VARIANTs.
*/
int FADF_VARIANT = 0x0800;
/**
* Bits reserved for future use.
*/
int FADF_RESERVED = 0xF008;
/**
* This function allocates a new string and copies the passed string into
* it.
*
* @param sz Null-terminated UNICODE string to copy.
*
* @return Null if there is insufficient memory or if a null pointer is
* passed in.
*/
BSTR SysAllocString(String sz);
/**
* This function frees a string allocated previously by SysAllocString,
* SysAllocStringByteLen, SysReAllocString, SysAllocStringLen, or
* SysReAllocStringLen.
*
* @param bstr Unicode string that was allocated previously, or NULL.
* Setting this parameter to NULL causes the function to simply
* return.
*/
void SysFreeString(BSTR bstr);
/**
* Returns the length (in bytes) of a BSTR.
*
* @param bstr Unicode string that was allocated previously.
*/
int SysStringByteLen(BSTR bstr);
/**
* Returns the length of a BSTR.
*
* @param bstr Unicode string that was allocated previously.
*/
int SysStringLen(BSTR bstr);
/**
* The VariantInit function initializes the VARIANTARG by setting the vt
* field to VT_EMPTY. Unlike VariantClear, this function does not interpret
* the current contents of the VARIANTARG. Use VariantInit to initialize new
* local variables of type VARIANTARG (or VARIANT).
*
* @param pvarg The variant to initialize.
*/
void VariantInit(VARIANT.ByReference pvarg);
/**
* The VariantInit function initializes the VARIANTARG by setting the vt
* field to VT_EMPTY. Unlike VariantClear, this function does not interpret
* the current contents of the VARIANTARG. Use VariantInit to initialize new
* local variables of type VARIANTARG (or VARIANT).
*
* @param pvarg The variant to initialize.
*/
void VariantInit(VARIANT pvarg);
/**
* First, free any memory that is owned by pvargDest, such as VariantClear
* (pvargDest must point to a valid initialized variant, and not simply to
* an uninitialized memory location). Then pvargDest receives an exact copy
* of the contents of pvargSrc.
* <p>
* If pvargSrc is a VT_BSTR, a copy of the string is made. If pvargSrcis a
* VT_ARRAY, the entire array is copied. If pvargSrc is a VT_DISPATCH or
* VT_UNKNOWN, AddRef is called to increment the object's reference count.
* <p>
* If the variant to be copied is a COM object that is passed by reference,
* the vtfield of the pvargSrcparameter is VT_DISPATCH | VT_BYREF or
* VT_UNKNOWN | VT_BYREF. In this case, VariantCopy does not increment the
* reference count on the referenced object. Because the variant being
* copied is a pointer to a reference to an object, VariantCopy has no way
* to determine if it is necessary to increment the reference count of the
* object. It is therefore the responsibility of the caller to call
* IUnknown::AddRef on the object or not, as appropriate.
* <p>
* Note The VariantCopy method is not threadsafe.
*
* @param pvargDest [out] The destination variant.
* @param pvargSrc [in] The source variant.
*
* @return the hresult
*/
HRESULT VariantCopy(Pointer pvargDest, VARIANT pvargSrc);
/**
* Use this function to clear variables of type VARIANTARG (or VARIANT)
* before the memory containing the VARIANTARG is freed (as when a local
* variable goes out of scope).
* <p>
* The function clears a VARIANTARG by setting the vt field to VT_EMPTY. The
* current contents of the VARIANTARG are released first. If the vtfield is
* VT_BSTR, the string is freed. If the vtfield is VT_DISPATCH, the object
* is released. If the vt field has the VT_ARRAY bit set, the array is
* freed.
* <p>
* If the variant to be cleared is a COM object that is passed by reference,
* the vtfield of the pvargparameter is VT_DISPATCH | VT_BYREF or VT_UNKNOWN
* | VT_BYREF. In this case, VariantClear does not release the object.
* Because the variant being cleared is a pointer to a reference to an
* object, VariantClear has no way to determine if it is necessary to
* release the object. It is therefore the responsibility of the caller to
* release the object or not, as appropriate.
* <p>
* In certain cases, it may be preferable to clear a variant in code without
* calling VariantClear. For example, you can change the type of a VT_I4
* variant to another type without calling this function. Safearrays of BSTR
* will have SysFreeString called on each element not VariantClear. However,
* you must call VariantClear if a VT_type is received but cannot be
* handled. Safearrays of variant will also have VariantClear called on each
* member. Using VariantClear in these cases ensures that code will continue
* to work if Automation adds new variant types in the future.
* <p>
* Do not use VariantClear on unitialized variants; use VariantInit to
* initialize a new VARIANTARG or VARIANT.
* <p>
* Variants containing arrays with outstanding references cannot be cleared.
* Attempts to do so will return an HRESULT containing DISP_E_ARRAYISLOCKED.
*
* @param pvarg [in, out] The variant to clear.
*
* @return the hresult
*/
HRESULT VariantClear(VARIANT pvarg);
public static final short VARIANT_NOVALUEPROP = 0x01;
/**
* For VT_BOOL to VT_BSTR conversions, convert to "True"/"False" instead of
* "-1"/"0"
*/
public static final short VARIANT_ALPHABOOL = 0x02;
/**
* For conversions to/from VT_BSTR, passes LOCALE_NOUSEROVERRIDE to core
* coercion routines
*/
public static final short VARIANT_NOUSEROVERRIDE = 0x04;
public static final short VARIANT_CALENDAR_HIJRI = 0x08;
/**
* For VT_BOOL to VT_BSTR and back, convert to local language rather than
* English
*/
public static final short VARIANT_LOCALBOOL = 0x10;
/**
* SOUTHASIA calendar support
*/
public static final short VARIANT_CALENDAR_THAI = 0x20;
/**
* SOUTHASIA calendar support
*/
public static final short VARIANT_CALENDAR_GREGORIAN = 0x40;
/**
* NLS function call support
*/
public static final short VARIANT_USE_NLS = 0x80;
/**
* Converts a variant from one type to another.
*
* @param pvargDest [out] The destination variant. If this is the same as
* pvarSrc, the variant will be converted in place.
* @param pvarSrc [in] The variant to convert.
* @param wFlags Combination of the following flags
* <table>
* <thead>
* <tr><th><!--indent under wFlags comment--><div style="visibility: hidden">wFlags</div></th><th>Value</th><th>Meaning</th></tr>
* </thead>
* <tbody valign="top">
* <tr><th></th><td>{@link #VARIANT_NOVALUEPROP}</td><td>Prevents the
* function from attempting to coerce an object to a fundamental type by
* getting the Value property. Applications should set this flag only if
* necessary, because it makes their behavior inconsistent with other
* applications.</td></tr>
* <tr><th></th><td>{@link #VARIANT_ALPHABOOL}</td><td>Converts a
* {@link Variant#VT_BOOL VT_BOOL} value to a string containing either
* "True" or "False".</td></tr>
* <tr><th></th><td>{@link #VARIANT_NOUSEROVERRIDE}</td><td>For conversions
* to or from {@link Variant#VT_BSTR VT_BSTR}, passes LOCALE_NOUSEROVERRIDE
* to the core coercion routines.</td></tr>
* <tr><th></th><td>{@link #VARIANT_LOCALBOOL}</td><td>For conversions from
* {@link Variant#VT_BOOL VT_BOOL} to {@link Variant#VT_BSTR VT_BSTR} and
* back, uses the language specified by the locale in use on the local
* computer.</td></tr>
* </tbody>
* </table>
* @param vt The type to convert to. If the return code is
* {@link WinError#S_OK S_OK}, the vt field of the vargDest
* is guaranteed to be equal to this value.
*
* @return This function can return one of these values:
* <table>
* <thead>
* <tr><th>Return code</th><th>Description</th></tr>
* </thead>
* <tbody valign="top">
* <tr><td>{@link WinError#S_OK S_OK}</td><td>Success.</td></tr>
* <tr><td>{@link WinError#DISP_E_BADVARTYPE DISP_E_BADVARTYPE}</td><td>The
* variant type is not a valid type of variant.</td></tr>
* <tr><td>{@link WinError#DISP_E_OVERFLOW DISP_E_OVERFLOW}</td><td>The data
* pointed to by pvarSrc does not fit in the destination type.</td></tr>
* <tr><td>{@link WinError#DISP_E_TYPEMISMATCH DISP_E_TYPEMISMATCH}</td><td>The
* argument could not be coerced to the specified type.</td></tr>
* <tr><td>{@link WinError#E_INVALIDARG E_INVALIDARG}</td><td>One of the
* arguments is not valid.</td></tr>
* <tr><td>{@link WinError#E_OUTOFMEMORY E_OUTOFMEMORY}</td><td>Insufficient
* memory to complete the operation.</td></tr>
* </tbody>
* </table>
* </p>
* <b>Remarks</b>
* </p>
* The VariantChangeType function handles coercions between the fundamental
* types (including numeric-to-string and string-to-numeric coercions). The
* pvarSrc argument is changed during the conversion process. For example,
* if the source variant is of type {@link Variant#VT_BOOL VT_BOOL} and the
* destination is of type {@link Variant#VT_UINT VT_UINT}, the pvarSrc
* argument is first converted to {@link Variant#VT_I2 VT_I2} and then the
* conversion proceeds. A variant that has {@link Variant#VT_BYREF VT_BYREF}
* set is coerced to a value by obtaining the referenced value. An object is
* coerced to a value by invoking the object's Value property
* ({@link OaIdl#DISPID_VALUE DISPID_VALUE}).
* </p>
* Typically, the implementor of
* {@link com.sun.jna.platform.win32.COM.IDispatch#Invoke IDispatch.Invoke}
* determines which member is being accessed, and then calls
* VariantChangeType to get the value of one or more arguments. For example,
* if the IDispatch call specifies a SetTitle member that takes one string
* argument, the implementor would call VariantChangeType to attempt to
* coerce the argument to {@link Variant#VT_BSTR VT_BSTR}. If
* VariantChangeType does not return an error, the argument could then be
* obtained directly from the
* {@link Variant.VARIANT._VARIANT.__VARIANT#bstrVal bstrVal} field of the
* {@link Variant.VARIANT VARIANT}. If VariantChangeType returns
* {@link WinError#DISP_E_TYPEMISMATCH DISP_E_TYPEMISMATCH}, the implementor
* would set {@link com.sun.jna.platform.win32.COM.IDispatch#Invoke Invoke}
* <code> puArgErr</code> parameter referenced value to 0 (indicating the
* argument in error) and return DISP_E_TYPEMISMATCH from Invoke.
* </p>
* Arrays of one type cannot be converted to arrays of another type with
* this function.
* </p>
* <b>Note</b> The type of a {@link Variant.VARIANT VARIANT} should not be
* changed in the {@link DISPPARAMS#rgvarg rgvarg} array in place.
*/
HRESULT VariantChangeType(VARIANT pvargDest, VARIANT pvarSrc, short wFlags, VARTYPE vt);
/**
* Converts a variant from one type to another.
* @param pvargDest [out] The destination variant. If this is the same as
* pvarSrc, the variant will be converted in place.
* @param pvarSrc [in] The variant to convert.
* @param wFlags Combination of the following flags
* <table>
* <thead>
* <tr><th><!--indent under wFlags comment--><div style="visibility: hidden">wFlags</div></th><th>Value</th><th>Meaning</th></tr>
* </thead>
* <tbody valign="top">
* <tr><th></th><td>{@link #VARIANT_NOVALUEPROP}</td><td>Prevents the function from attempting to coerce an object to a fundamental type by getting the Value property. Applications should set this flag only if necessary, because it makes their behavior inconsistent with other applications.</td></tr>
* <tr><th></th><td>{@link #VARIANT_ALPHABOOL}</td><td>Converts a {@link Variant#VT_BOOL VT_BOOL} value to a string containing either "True" or "False".</td></tr>
* <tr><th></th><td>{@link #VARIANT_NOUSEROVERRIDE}</td><td>For conversions to or from {@link Variant#VT_BSTR VT_BSTR}, passes LOCALE_NOUSEROVERRIDE to the core coercion routines.</td></tr>
* <tr><th></th><td>{@link #VARIANT_LOCALBOOL}</td><td>For conversions from {@link Variant#VT_BOOL VT_BOOL} to {@link Variant#VT_BSTR VT_BSTR} and back, uses the language specified by the locale in use on the local computer.</td></tr>
* </tbody>
* </table>
* @param vt The type to convert to. If the return code is {@link WinError#S_OK S_OK}, the vt
* field of the vargDest is guaranteed to be equal to this value.
* @return This function can return one of these values:
* <table>
* <thead>
* <tr><th>Return code</th><th>Description</th></tr>
* </thead>
* <tbody valign="top">
* <tr><td>{@link WinError#S_OK S_OK}</td><td>Success.</td></tr>
* <tr><td>{@link WinError#DISP_E_BADVARTYPE DISP_E_BADVARTYPE}</td><td>The variant type is not a valid type of variant.</td></tr>
* <tr><td>{@link WinError#DISP_E_OVERFLOW DISP_E_OVERFLOW}</td><td>The data pointed to by pvarSrc does not fit in the destination type.</td></tr>
* <tr><td>{@link WinError#DISP_E_TYPEMISMATCH DISP_E_TYPEMISMATCH}</td><td>The argument could not be coerced to the specified type.</td></tr>
* <tr><td>{@link WinError#E_INVALIDARG E_INVALIDARG}</td><td>One of the arguments is not valid.</td></tr>
* <tr><td>{@link WinError#E_OUTOFMEMORY E_OUTOFMEMORY}</td><td>Insufficient memory to complete the operation.</td></tr>
* </tbody>
* </table>
*</p>
* <b>Remarks</b>
*</p>
* The VariantChangeType function handles coercions between the fundamental
* types (including numeric-to-string and string-to-numeric coercions). The
* pvarSrc argument is changed during the conversion process. For example,
* if the source variant is of type {@link Variant#VT_BOOL VT_BOOL} and the
* destination is of type {@link Variant#VT_UINT VT_UINT}, the pvarSrc
* argument is first converted to {@link Variant#VT_I2 VT_I2} and then the
* conversion proceeds. A variant that has {@link Variant#VT_BYREF VT_BYREF}
* set is coerced to a value by obtaining the referenced value. An object is
* coerced to a value by invoking the object's Value property
* ({@link OaIdl#DISPID_VALUE DISPID_VALUE}).
*</p>
* Typically, the implementor of
* {@link com.sun.jna.platform.win32.COM.IDispatch#Invoke IDispatch.Invoke}
* determines which member is being accessed, and then calls
* VariantChangeType to get the value of one or more arguments. For example,
* if the IDispatch call specifies a SetTitle member that takes one string
* argument, the implementor would call VariantChangeType to attempt to
* coerce the argument to {@link Variant#VT_BSTR VT_BSTR}. If
* VariantChangeType does not return an error, the argument could then be
* obtained directly from the
* {@link Variant.VARIANT._VARIANT.__VARIANT#bstrVal bstrVal} field of the
* {@link Variant.VARIANT VARIANT}. If VariantChangeType returns
* {@link WinError#DISP_E_TYPEMISMATCH DISP_E_TYPEMISMATCH}, the implementor
* would set {@link com.sun.jna.platform.win32.COM.IDispatch#Invoke Invoke}
* <code> puArgErr</code> parameter referenced value to 0 (indicating the
* argument in error) and return DISP_E_TYPEMISMATCH from Invoke.
*</p>
* Arrays of one type cannot be converted to arrays of another type with
* this function.
*</p>
* <b>Note</b> The type of a {@link Variant.VARIANT VARIANT} should not be
* changed in the {@link DISPPARAMS#rgvarg rgvarg} array in place.
*/
HRESULT VariantChangeType(VARIANT.ByReference pvargDest, VARIANT.ByReference pvarSrc, short wFlags, VARTYPE vt);
/**
* Creates a new array descriptor, allocates and initializes the data for
* the array, and returns a pointer to the new array descriptor.
*
* @param vt [in] The base type of the array (the VARTYPE of each
* element of the array). The VARTYPE is restricted to a
* subset of the variant types. Neither the VT_ARRAY nor
* the VT_BYREF flag can be set. VT_EMPTY and VT_NULL are
* not valid base types for the array. All other types are
* legal. cDims
*
* @param cDims the number of dims
* @param rgsabound the rgsabound
*
* @return Return value
*
* A safe array descriptor, or null if the array could not be created.
*/
SAFEARRAY.ByReference SafeArrayCreate(VARTYPE vt, UINT cDims,
SAFEARRAYBOUND[] rgsabound);
/**
* Stores the data element at the specified location in the array.
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
* @param idx the idx
* @param pv [in] The data to assign to the array. The variant types
* VT_DISPATCH, VT_UNKNOWN, and VT_BSTR are pointers, and do not
* require another level of indirection.
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>DISP_E_BADINDEX</dt><dd>The specified index is not valid.</dd>
* <dt>E_INVALIDARG</dt><dd>One of the arguments is not valid.</dd>
* <dt>E_OUTOFMEMORY</dt><dd>Memory could not be allocated for the
* element.</dd>
* </dl>
*/
HRESULT SafeArrayPutElement(SAFEARRAY psa, LONG[] idx, Pointer pv);
/**
* Retrieve the upper bound for the specified dimension of the supplied
* array
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
* @param nDim [in] the dimension, one based
* @param bound [out] upper bound for the supplied dimension
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>DISP_E_BADINDEX</dt><dd>The specified index is not valid.</dd>
* <dt>E_INVALIDARG</dt><dd>One of the arguments is not valid.</dd>
* </dl>
*/
HRESULT SafeArrayGetUBound(SAFEARRAY psa, UINT nDim, WinDef.LONGByReference bound);
/**
* Retrieve the lower bound for the specified dimension of the supplied
* array
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
* @param nDim [in] the dimension, one based
* @param bound [out] lower bound for the supplied dimension
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>DISP_E_BADINDEX</dt><dd>The specified index is not valid.</dd>
* <dt>E_INVALIDARG</dt><dd>One of the arguments is not valid.</dd>
* </dl>
*/
HRESULT SafeArrayGetLBound(SAFEARRAY psa, UINT nDim, WinDef.LONGByReference bound);
/**
* Retrieves a single element of the array.
* <p>
* The array is automaticly locked via SafeArrayLock and SafeArrayUnlock.
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
* @param rgIndices [in] A vector of indexes for each dimension of the
* array. The right-most (least significant) dimension is
* rgIndices[0]. The left-most dimension is stored at
* rgIndices[psa->cDims - 1].
* @param pv [out] The element of the array.
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>DISP_E_BADINDEX</dt><dd>The specified index is not valid.</dd>
* <dt>E_INVALIDARG</dt><dd>One of the arguments is not valid.</dd>
* <dt>E_OUTOFMEMORY</dt><dd>Memory could not be allocated for the
* element.</dd>
* </dl>
*/
HRESULT SafeArrayGetElement(SAFEARRAY psa, LONG[] rgIndices, Pointer pv);
/**
* Retrieves the pointer to a single element of the array.
* <p>
* <p>
* The caller is responsible for locking.</p>
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
* @param rgIndices [in] A vector of indexes for each dimension of the
* array. The right-most (least significant) dimension is
* rgIndices[0]. The left-most dimension is stored at
* rgIndices[psa->cDims - 1].
* @param ppv [out] The element of the array.
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>DISP_E_BADINDEX</dt><dd>The specified index is not valid.</dd>
* <dt>E_INVALIDARG</dt><dd>One of the arguments is not valid.</dd>
* </dl>
*/
HRESULT SafeArrayPtrOfIndex(SAFEARRAY psa, LONG[] rgIndices, PointerByReference ppv);
/**
* Increments the lock count of an array, and places a pointer to the array
* data in pvData of the array descriptor.
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>E_INVALIDARG</dt><dd>The argument psa is not valid.</dd>
* <dt>E_UNEXPECTED</dt><dd>The array could not be locked.</dd>
* </dl>
*/
HRESULT SafeArrayLock(SAFEARRAY psa);
/**
* Decrements the lock count of an array so it can be freed or resized.
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>E_INVALIDARG</dt><dd>The argument psa is not valid.</dd>
* <dt>E_UNEXPECTED</dt><dd>The array could not be locked.</dd>
* </dl>
*/
HRESULT SafeArrayUnlock(SAFEARRAY psa);
/**
* Destroys an existing array descriptor and all of the data in the array.
* If objects are stored in the array, Release is called on each object in
* the array.
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>E_INVALIDARG</dt><dd>The argument psa is not valid.</dd>
* <dt>DISP_E_ARRAYISLOCKED</dt><dd>The array could not be locked.</dd>
* </dl>
*/
HRESULT SafeArrayDestroy(SAFEARRAY psa);
/**
* Changes the right-most (least significant) bound of the specified safe
* array.
*
* @param psa [in, out] An array descriptor created by
* SafeArrayCreate.
* @param psaboundNew [in] New bounds for the least significant dimension
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>E_INVALIDARG</dt><dd>The argument psa is not valid.</dd>
* <dt>DISP_E_ARRAYISLOCKED</dt><dd>The array could not be locked.</dd>
* </dl>
*/
HRESULT SafeArrayRedim(SAFEARRAY psa, SAFEARRAYBOUND psaboundNew);
/**
* Return VARTYPE of the SAFEARRAY
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
* @param pvt [in] Vartype of the SAFEARRAY
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>E_INVALIDARG</dt><dd>The argument psa is not valid.</dd>
* </dl>
*/
HRESULT SafeArrayGetVartype(SAFEARRAY psa, VARTYPEByReference pvt);
/**
* Return number of dimensions of the SAFEARRAY
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
*
* @return Return count of dimensions
*/
UINT SafeArrayGetDim(SAFEARRAY psa);
/**
* Lock array and retrieve pointer to data
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
* @param ppvData [in] pointer to the data array
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>E_INVALIDARG</dt><dd>The argument psa is not valid.</dd>
* <dt>E_UNEXPECTED</dt><dd>The array could not be locked.</dd>
* </dl>
*/
HRESULT SafeArrayAccessData(SAFEARRAY psa, PointerByReference ppvData);
/**
* Unlock array and invalidate the pointer retrieved via SafeArrayAccessData
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
*
* @return Return value
*
* This function can return one of these values.
*
* <dl>
* <dt>S_OK</dt><dd>Success.</dd>
* <dt>E_INVALIDARG</dt><dd>The argument psa is not valid.</dd>
* <dt>E_UNEXPECTED</dt><dd>The array could not be locked.</dd>
* </dl>
*/
HRESULT SafeArrayUnaccessData(SAFEARRAY psa);
/**
* Get size of one element in bytes
*
* @param psa [in] An array descriptor created by SafeArrayCreate.
*
* @return size in bytes
*/
UINT SafeArrayGetElemsize(SAFEARRAY psa);
/**
* Retrieves a pointer to a running object that has been registered with
* OLE.
*
* @param rclsid [in] The class identifier (CLSID) of the active object
* from the OLE registration database.
* @param pvReserved Reserved for future use. Must be null.
* @param ppunk [out] The requested active object.
*
* @return Return value
*
* If this function succeeds, it returns S_OK. Otherwise, it returns an
* HRESULT error code.
*/
HRESULT GetActiveObject(GUID rclsid, PVOID pvReserved, PointerByReference ppunk);
/**
* The Class DISPPARAMS.
*/
@FieldOrder({"rgvarg", "rgdispidNamedArgs", "cArgs", "cNamedArgs"})
public class DISPPARAMS extends Structure {
/**
* The Class ByReference.
*/
public static class ByReference extends DISPPARAMS implements
Structure.ByReference {
}
/**
* The rgvarg.
*/
public VariantArg.ByReference rgvarg;
/**
* The rgdispid named args.
*/
public Pointer rgdispidNamedArgs = Pointer.NULL;
/**
* The c args. - use setArgs to update arguments
*/
public UINT cArgs = new UINT(0);
/**
* The c named args. - use setRgdispidNamedArgs to update named
* arguments map
*/
public UINT cNamedArgs = new UINT(0);
public DISPID[] getRgdispidNamedArgs() {
DISPID[] namedArgs = null;
int count = cNamedArgs.intValue();
if (rgdispidNamedArgs != null && count > 0) {
int[] rawData = rgdispidNamedArgs.getIntArray(0, count);
namedArgs = new DISPID[count];
for (int i = 0; i < count; i++) {
namedArgs[i] = new DISPID(rawData[i]);
}
} else {
namedArgs = new DISPID[0];
}
return namedArgs;
}
public void setRgdispidNamedArgs(DISPID[] namedArgs) {
if (namedArgs == null) {
namedArgs = new DISPID[0];
}
cNamedArgs = new UINT(namedArgs.length);
rgdispidNamedArgs = new Memory(DISPID.SIZE * namedArgs.length);
int[] rawData = new int[namedArgs.length];
for (int i = 0; i < rawData.length; i++) {
rawData[i] = namedArgs[i].intValue();
}
rgdispidNamedArgs.write(0, rawData, 0, namedArgs.length);
}
public VARIANT[] getArgs() {
if (this.rgvarg != null) {
this.rgvarg.setArraySize(cArgs.intValue());
return this.rgvarg.variantArg;
} else {
return new VARIANT[0];
}
}
public void setArgs(VARIANT[] arguments) {
if (arguments == null) {
arguments = new VARIANT[0];
}
rgvarg = new VariantArg.ByReference(arguments);
cArgs = new UINT(arguments.length);
}
/**
* Instantiates a new dispparams.
*/
public DISPPARAMS() {
super();
}
/**
* Instantiates a new dispparams.
*
* @param memory the memory
*/
public DISPPARAMS(Pointer memory) {
super(memory);
this.read();
}
}
/**
* Uses registry information to load a type library.
*
* @param rguid The GUID of the library.
* @param wVerMajor The major version of the library.
* @param wVerMinor The minor version of the library.
* @param lcid The national language code of the library.
* @param pptlib The loaded type library.
*
* This function can return one of these values: S_OK Success.
*
* E_INVALIDARG One or more of the arguments is not valid.
*
* E_OUTOFMEMORY Insufficient memory to complete the operation.
*
* TYPE_E_IOERROR The function could not write to the file.
*
* TYPE_E_INVALIDSTATE The type library could not be opened.
*
* TYPE_E_INVDATAREAD The function could not read from the file.
*
* TYPE_E_UNSUPFORMAT The type library has an older format.
*
* TYPE_E_UNKNOWNLCID The LCID could not be found in the OLE-supported DLLs.
*
* TYPE_E_CANTLOADLIBRARY The type library or DLL could not be loaded.
*
* @return status
*/
HRESULT LoadRegTypeLib(GUID rguid, int wVerMajor, int wVerMinor, LCID lcid, PointerByReference pptlib);
/**
* Loads and registers a type library.
*
* @param szFile The name of the file from which the method should attempt
* to load a type library.
*
* @param pptlib The loaded type library. Return value
*
* This function can return one of these values.
*
* S_OK Success.
*
* E_INVALIDARG One or more of the arguments is not valid.
*
* E_OUTOFMEMORY Insufficient memory to complete the operation.
*
* TYPE_E_IOERROR The function could not write to the file.
*
* TYPE_E_INVALIDSTATE The type library could not be opened.
*
* TYPE_E_INVDATAREAD The function could not read from the file.
*
* TYPE_E_UNSUPFORMAT The type library has an older format.
*
* TYPE_E_UNKNOWNLCID The LCID could not be found in the OLE-supported DLLs.
*
* TYPE_E_CANTLOADLIBRARY The type library or DLL could not be loaded.
*
* @return status
*/
HRESULT LoadTypeLib(String szFile, PointerByReference pptlib);
/**
* Converts a system time to a variant representation.
*
* @param lpSystemTime [in] The system time.
*
* @param pvtime [out] The variant time.
*
* @return The function returns TRUE on success and FALSE otherwise.
*/
int SystemTimeToVariantTime(SYSTEMTIME lpSystemTime, DoubleByReference pvtime);
/**
* Converts a variant representation of time to a system time.
*
* @param vtime [in] The variant time.
*
* @param lpSystemTime [out] The system time.
*
* @return The function returns TRUE on success and FALSE otherwise.
*/
int VariantTimeToSystemTime(double vtime, SYSTEMTIME lpSystemTime);
}
| java-native-access/jna | contrib/platform/src/com/sun/jna/platform/win32/OleAuto.java |
1,447 | /*
* Created on May 25, 2004
*
* Paros and its related class files.
*
* Paros is an HTTP/HTTPS proxy for assessing web application security.
* Copyright (C) 2003-2004 Chinotec Technologies Company
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the Clarified Artistic License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Clarified Artistic License for more details.
*
* You should have received a copy of the Clarified Artistic License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
// ZAP: 2011/05/09 Support for API
// ZAP: 2011/05/15 Support for exclusions
// ZAP: 2012/03/15 Removed unnecessary castings from methods notifyListenerRequestSend,
// notifyListenerResponseReceive and isProcessCache. Set the name of the proxy thread.
// Replaced the class HttpBody with the new class HttpRequestBody and replaced the method
// call from readBody to readRequestBody of the class HttpInputStream.
// ZAP: 2012/04/25 Added @Override annotation to the appropriate method.
// ZAP: 2012/05/11 Do not close connections in final clause of run() method,
// if boolean attribute keepSocketOpen is set to true.
// ZAP: 2012/08/07 Issue 342 Support the HttpSenderListener
// ZAP: 2012/11/04 Issue 408: Add support to encoding transformations, added an
// option to control whether the "Accept-Encoding" request-header field is
// modified/removed or not.
// ZAP: 2012/12/27 Added support for PersistentConnectionListener.
// ZAP: 2013/01/04 Do beginSSL() on HTTP CONNECT only if port requires so.
// ZAP: 2013/03/03 Issue 547: Deprecate unused classes and methods
// ZAP: 2013/04/11 Issue 621: Handle requests to the proxy URL
// ZAP: 2013/04/14 Issue 622: Local proxy unable to correctly detect requests to itself
// ZAP: 2013/06/17 Issue 686: Log HttpException (as error) in the ProxyThread
// ZAP: 2013/12/13 Issue 939: ZAP should accept SSL connections on non-standard ports automatically
// ZAP: 2014/03/06 Issue 1063: Add option to decode all gzipped content
// ZAP: 2014/03/23 Tidy up, extracted a method that writes an HTTP response and moved the
// code responsible to decode a GZIP response to a method
// ZAP: 2014/03/23 Fixed an issue with ProxyThread that happened when the proxy was set to listen on
// any address in which case the requests to the proxy itself were not correctly detected.
// ZAP: 2014/03/23 Issue 122: ProxyThread logging timeout readings with incorrect message (URL)
// ZAP: 2014/03/23 Issue 585: Proxy - "502 Bad Gateway" errors responded as "504 Gateway Timeout"
// ZAP: 2014/03/23 Issue 969: Proxy - Do not include the response body when answering unsuccessful
// HEAD requests
// ZAP: 2014/03/23 Issue 1017: Proxy set to 0.0.0.0 causes incorrect PAC file to be generated
// ZAP: 2014/03/23 Issue 1022: Proxy - Allow to override a proxied message
// ZAP: 2014/04/17 Issue 1156: Proxy gzip decoder doesn't update content length in response headers
// ZAP: 2014/05/01 Issue 1156: Proxy gzip decoder removes newlines in decoded response
// ZAP: 2014/05/01 Issue 1168: Add support for deflate encoded responses
// ZAP: 2015/01/04 Issue 1334: ZAP does not handle API requests on reused connections
// ZAP: 2015/02/24 Issue 1540: Allow proxy scripts to fake responses
// ZAP: 2015/07/17 Show stack trace of the exceptions on proxy errors
// ZAP: 2016/03/18 Issue 2318: ZAP Error [java.net.SocketTimeoutException]: Read timed out when
// running on AWS EC2 instance
// ZAP: 2016/04/13 Notify of timeouts when reading a response
// ZAP: 2016/04/14 Delay the write of response to not attempt to write a response again when
// handling IOException
// ZAP: 2016/04/29 Adjust exception logging levels and log when timeouts happen
// ZAP: 2016/05/30 Issue 2494: ZAP Proxy is not showing the HTTP CONNECT Request in history tab
// ZAP: 2016/06/13 Remove all unsupported encodings (instead of just some)
// ZAP: 2016/09/22 JavaDoc tweaks
// ZAP: 2016/11/28 Correct proxy errors' Content-Length value.
// ZAP: 2016/12/07 Allow to extend the ProxyThread and use a custom HttpSender
// ZAP: 2016/12/23 Make SocketTimeoutException less verbose for general use
// ZAP: 2017/02/08 Differentiate client read timeout after CONNECT, from server read timeout.
// ZAP: 2017/02/08 Change CONNECT response to contain just the status line, helps Android emulator
// consume the response.
// ZAP: 2017/02/20 Issue 2699: Make SSLException handling more user friendly
// ZAP: 2017/02/23 Issue 3227: Limit API access to permitted IP addresses
// ZAP: 2017/03/15 Disable API by default
// ZAP: 2017/03/26 Check the public address when behind NAT.
// ZAP: 2017/06/12 Do not notify listeners when request is excluded.
// ZAP: 2017/09/22 Check if first message received is a SSL/TLS handshake and tweak exception
// message.
// ZAP: 2017/10/02 Improve error handling when checking if SSL/TLS handshake.
// ZAP: 2018/01/29 Fix API issues with pconn connections
// ZAP: 2019/04/08 Issue 5304: Check for UnknownHostException and include appropriate message if
// proxy chain might be the cause.
// ZAP: 2019/06/01 Normalise line endings.
// ZAP: 2019/06/05 Normalise format/style.
// ZAP: 2020/11/26 Use Log4j 2 classes for logging.
// ZAP: 2020/12/09 Rely on the content encodings from the body to decode.
// ZAP: 2022/02/09 Deprecate the class.
// ZAP: 2022/05/20 Address deprecation warnings with ConnectionParam.
// ZAP: 2022/06/05 Address deprecation warnings with HttpException.
// ZAP: 2022/06/07 Address deprecation warnings with ZapGetMethod.
// ZAP: 2022/09/21 Use format specifiers instead of concatenation when logging.
// ZAP: 2022/09/26 Remove usage of org.ice4j classes.
package org.parosproxy.paros.core.proxy;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Vector;
import javax.net.ssl.SSLException;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.parosproxy.paros.Constant;
import org.parosproxy.paros.db.RecordHistory;
import org.parosproxy.paros.model.Model;
import org.parosproxy.paros.network.HttpBody;
import org.parosproxy.paros.network.HttpHeader;
import org.parosproxy.paros.network.HttpInputStream;
import org.parosproxy.paros.network.HttpMalformedHeaderException;
import org.parosproxy.paros.network.HttpMessage;
import org.parosproxy.paros.network.HttpOutputStream;
import org.parosproxy.paros.network.HttpRequestHeader;
import org.parosproxy.paros.network.HttpResponseHeader;
import org.parosproxy.paros.network.HttpSender;
import org.zaproxy.zap.PersistentConnectionListener;
import org.zaproxy.zap.extension.api.API;
import org.zaproxy.zap.network.HttpRequestBody;
import org.zaproxy.zap.network.HttpRequestConfig;
/**
* @deprecated No longer used/needed. It will be removed in a future release.
*/
@Deprecated
public class ProxyThread implements Runnable {
// private static final int BUFFEREDSTREAM_SIZE = 4096;
private static final String CONNECT_HTTP_200 = "HTTP/1.1 200 Connection established\r\n\r\n";
// private static ArrayList processForwardList = new ArrayList();
private static Logger log = LogManager.getLogger(ProxyThread.class);
private static final String BAD_GATEWAY_RESPONSE_STATUS = "502 Bad Gateway";
private static final String GATEWAY_TIMEOUT_RESPONSE_STATUS = "504 Gateway Timeout";
/** A {@code HttpRequestConfig} that does not allow notification of events to listeners. */
private static final HttpRequestConfig EXCLUDED_REQ_CONFIG =
HttpRequestConfig.builder().setNotifyListeners(false).build();
// change httpSender to static to be shared among proxies to reuse keep-alive connections
protected ProxyServer parentServer = null;
protected ProxyParam proxyParam = null;
protected org.parosproxy.paros.network.ConnectionParam connectionParam = null;
protected Thread thread = null;
protected Socket inSocket = null;
protected Socket outSocket = null;
protected HttpInputStream httpIn = null;
protected HttpOutputStream httpOut = null;
protected ProxyThread originProcess = this;
private HttpSender httpSender = null;
private Object semaphore = this;
// ZAP: New attribute to allow for skipping disconnect
private boolean keepSocketOpen = false;
private static Object semaphoreSingleton = new Object();
private static int id = 1;
private static Vector<Thread> proxyThreadList = new Vector<>();
protected ProxyThread(ProxyServer server, Socket socket) {
this(server, socket, null);
}
/**
* Constructs a {@code ProxyThread} with the given proxy server, socket and HTTP sender.
*
* @param server the parent proxy server.
* @param socket the connected socket to read/write the messages.
* @param httpSender the object used to send the messages, might be {@code null} in which case a
* default is used.
* @since 2.6.0
*/
protected ProxyThread(ProxyServer server, Socket socket, HttpSender httpSender) {
parentServer = server;
proxyParam = parentServer.getProxyParam();
connectionParam = parentServer.getConnectionParam();
this.httpSender = httpSender;
inSocket = socket;
try {
inSocket.setTcpNoDelay(true);
// ZAP: Set timeout
inSocket.setSoTimeout(connectionParam.getTimeoutInSecs() * 1000);
} catch (SocketException e) {
// ZAP: Log exceptions
log.warn(e.getMessage(), e);
}
thread = new Thread(this, "ZAP-ProxyThread-" + id++); // ZAP: Set the name of the thread.
thread.setDaemon(true);
thread.setPriority(Thread.NORM_PRIORITY - 1);
}
public void start() {
thread.start();
}
/**
* @param targethost the host where you want to connect to
* @throws IOException if an error occurred while establishing the SSL/TLS connection
*/
@SuppressWarnings("deprecation")
private void beginSSL(String targethost) throws IOException {
// ZAP: added parameter 'targethost'
try {
inSocket = HttpSender.getSSLConnector().createTunnelServerSocket(targethost, inSocket);
} catch (org.parosproxy.paros.security.MissingRootCertificateException e) {
// throw again, cause will be caught later.
throw new org.parosproxy.paros.security.MissingRootCertificateException(e);
} catch (Exception e) {
StringBuilder strBuilder = new StringBuilder(125);
strBuilder.append("Error while establishing SSL connection for ");
if (targethost == null) {
strBuilder.append(
"an unknown target domain (relying on SNI extension), cause: "
+ e.getMessage());
} else {
strBuilder.append("'" + targethost + "'!");
}
throw new IOException(strBuilder.toString(), e);
}
httpIn = new HttpInputStream(inSocket);
httpOut = new HttpOutputStream(inSocket.getOutputStream());
}
private static boolean isSslTlsHandshake(BufferedInputStream inputStream) throws IOException {
byte[] bytes = new byte[3];
inputStream.mark(3);
int bytesRead = inputStream.read(bytes);
inputStream.reset();
if (bytesRead == -1) {
throw new IOException(
"Failed to check if SSL/TLS handshake, reached end of the stream.");
}
if (bytesRead < 3) {
log.debug(
"Failed to check if SSL/TLS handshake, got just {} bytes: {}",
bytesRead,
Arrays.toString(bytes));
return false;
}
// Check if ContentType is handshake(22)
if (bytes[0] == 0x16) {
// Check if "valid" ProtocolVersion >= SSLv3 (TLSv1, TLSv1.1, ...) or SSLv2
if (bytes[1] >= 0x03 || (bytes[1] == 0x00 && bytes[2] == 0x02)) {
return true;
}
}
return false;
}
@Override
@SuppressWarnings("deprecation")
public void run() {
proxyThreadList.add(thread);
boolean isSecure = false;
HttpRequestHeader firstHeader = null;
try {
BufferedInputStream bufferedInputStream =
new BufferedInputStream(inSocket.getInputStream(), 2048);
inSocket =
new CustomStreamsSocket(
inSocket, bufferedInputStream, inSocket.getOutputStream());
if (isSslTlsHandshake(bufferedInputStream)) {
isSecure = true;
beginSSL(null);
}
httpIn = new HttpInputStream(inSocket);
httpOut = new HttpOutputStream(inSocket.getOutputStream());
firstHeader = httpIn.readRequestHeader(isSecure);
firstHeader.setSenderAddress(inSocket.getInetAddress());
if (firstHeader.getMethod().equalsIgnoreCase(HttpRequestHeader.CONNECT)) {
HttpMessage connectMsg = new HttpMessage(firstHeader);
connectMsg.setTimeSentMillis(System.currentTimeMillis());
try {
httpOut.write(CONNECT_HTTP_200);
httpOut.flush();
connectMsg.setResponseHeader(CONNECT_HTTP_200);
connectMsg.setTimeElapsedMillis(
(int) (System.currentTimeMillis() - connectMsg.getTimeSentMillis()));
notifyConnectMessage(connectMsg);
if (isSslTlsHandshake(bufferedInputStream)) {
isSecure = true;
beginSSL(firstHeader.getHostName());
}
firstHeader = httpIn.readRequestHeader(isSecure);
firstHeader.setSenderAddress(inSocket.getInetAddress());
processHttp(firstHeader, isSecure);
} catch (org.parosproxy.paros.security.MissingRootCertificateException e) {
// Unluckily Firefox and Internet Explorer will not show this message.
// We should find a way to let the browsers display this error message.
// May we can redirect to some kind of ZAP custom error page.
final HttpMessage errmsg = new HttpMessage(firstHeader);
setErrorResponse(errmsg, BAD_GATEWAY_RESPONSE_STATUS, e, "ZAP SSL Error");
writeHttpResponse(errmsg, httpOut);
throw new IOException(e);
}
} else {
processHttp(firstHeader, isSecure);
}
} catch (SocketTimeoutException e) {
// ZAP: Log the exception
if (firstHeader != null) {
if (HttpRequestHeader.CONNECT.equalsIgnoreCase(firstHeader.getMethod())) {
log.warn(
"Timeout reading (client) message after CONNECT to {}",
firstHeader.getURI());
} else {
log.warn("Timeout accessing {}", firstHeader.getURI());
}
} else {
log.warn("Socket timeout while reading first message.");
log.debug(e, e);
}
} catch (HttpMalformedHeaderException e) {
log.warn("Malformed Header: ", e);
} catch (org.apache.commons.httpclient.HttpException e) {
log.error(e.getMessage(), e);
} catch (IOException e) {
log.debug("IOException: ", e);
} finally {
proxyThreadList.remove(thread);
// ZAP: do only close if flag is false
if (!keepSocketOpen) {
disconnect();
}
}
}
/**
* Notifies the {@code ConnectRequestProxyListener}s that a HTTP CONNECT request was received
* from a client.
*
* @param connectMessage the HTTP CONNECT request received from a client
*/
private void notifyConnectMessage(HttpMessage connectMessage) {
for (ConnectRequestProxyListener listener :
parentServer.getConnectRequestProxyListeners()) {
try {
listener.receivedConnectRequest(connectMessage);
} catch (Exception e) {
log.error("An error occurred while notifying listener:", e);
}
}
}
private void setErrorResponse(HttpMessage msg, String responseStatus, Exception cause)
throws HttpMalformedHeaderException {
setErrorResponse(msg, responseStatus, cause, "ZAP Error");
}
private void setErrorResponse(
HttpMessage msg, String responseStatus, Exception cause, String errorType)
throws HttpMalformedHeaderException {
StringBuilder strBuilder = new StringBuilder();
if (cause instanceof SSLException) {
strBuilder.append(Constant.messages.getString("network.ssl.error.connect"));
strBuilder.append(msg.getRequestHeader().getURI().toString()).append('\n');
strBuilder
.append(Constant.messages.getString("network.ssl.error.exception"))
.append(cause.getMessage())
.append('\n');
strBuilder
.append(Constant.messages.getString("network.ssl.error.exception.rootcause"))
.append(ExceptionUtils.getRootCauseMessage(cause))
.append('\n');
strBuilder.append(
Constant.messages.getString(
"network.ssl.error.help",
Constant.messages.getString("network.ssl.error.help.url")));
log.warn(strBuilder.toString());
if (log.isDebugEnabled()) {
log.debug(cause, cause);
strBuilder.append("\n\nStack Trace:\n");
for (String stackTraceFrame : ExceptionUtils.getRootCauseStackTrace(cause)) {
strBuilder.append(stackTraceFrame).append('\n');
}
}
} else {
strBuilder
.append(errorType)
.append(" [")
.append(cause.getClass().getName())
.append("]: ")
.append(cause.getLocalizedMessage())
.append("\n");
if (cause instanceof UnknownHostException
&& connectionParam.isUseProxyChain()
&& connectionParam.getProxyChainName().equalsIgnoreCase(cause.getMessage())) {
strBuilder.append(
Constant.messages.getString("conn.options.proxy.error.response.msg"));
}
strBuilder.append("\n\nStack Trace:\n");
for (String stackTraceFrame : ExceptionUtils.getRootCauseStackTrace(cause)) {
strBuilder.append(stackTraceFrame).append('\n');
}
}
setErrorResponse(msg, responseStatus, strBuilder.toString());
}
private static void setErrorResponse(HttpMessage msg, String responseStatus, String message)
throws HttpMalformedHeaderException {
HttpResponseHeader responseHeader = new HttpResponseHeader("HTTP/1.1 " + responseStatus);
responseHeader.setHeader(HttpHeader.CONTENT_TYPE, "text/plain; charset=UTF-8");
responseHeader.setHeader(
HttpHeader.CONTENT_LENGTH,
Integer.toString(message.getBytes(StandardCharsets.UTF_8).length));
msg.setResponseHeader(responseHeader);
if (!HttpRequestHeader.HEAD.equals(msg.getRequestHeader().getMethod())) {
msg.setResponseBody(message);
}
}
private static void writeHttpResponse(HttpMessage msg, HttpOutputStream outputStream)
throws IOException {
outputStream.write(msg.getResponseHeader());
outputStream.flush();
if (msg.getResponseBody().length() > 0) {
outputStream.write(msg.getResponseBody().getBytes());
outputStream.flush();
}
}
protected void processHttp(HttpRequestHeader requestHeader, boolean isSecure)
throws IOException {
HttpRequestBody reqBody =
null; // ZAP: Replaced the class HttpBody with the class HttpRequestBody.
boolean isFirstRequest = true;
HttpMessage msg = null;
// reduce socket timeout after first read
inSocket.setSoTimeout(2500);
do {
if (isFirstRequest) {
isFirstRequest = false;
} else {
try {
requestHeader = httpIn.readRequestHeader(isSecure);
requestHeader.setSenderAddress(inSocket.getInetAddress());
} catch (SocketTimeoutException e) {
// ZAP: Log the exception
log.debug("Timed out while reading a new HTTP request.");
return;
}
}
if (parentServer.isEnableApi()) {
msg =
API.getInstance()
.handleApiRequest(
requestHeader, httpIn, httpOut, isRecursive(requestHeader));
if (msg != null) {
if (msg.getRequestHeader().isEmpty()) {
return;
}
org.zaproxy.zap.ZapGetMethod method = new org.zaproxy.zap.ZapGetMethod();
method.setUpgradedSocket(inSocket);
method.setUpgradedInputStream(httpIn);
keepSocketOpen = notifyPersistentConnectionListener(msg, inSocket, method);
return;
}
}
msg = new HttpMessage();
msg.setRequestHeader(requestHeader);
if (msg.getRequestHeader().getContentLength() > 0) {
reqBody =
httpIn.readRequestBody(
requestHeader); // ZAP: Changed to call the method readRequestBody.
msg.setRequestBody(reqBody);
}
if (proxyParam.isRemoveUnsupportedEncodings()) {
removeUnsupportedEncodings(msg);
}
if (isProcessCache(msg)) {
continue;
}
// System.out.println("send required: " +
// msg.getRequestHeader().getURI().toString());
if (parentServer.isSerialize()) {
semaphore = semaphoreSingleton;
} else {
semaphore = this;
}
boolean send = true;
boolean excluded = parentServer.excludeUrl(msg.getRequestHeader().getURI());
synchronized (semaphore) {
if (!excluded) {
if (notifyOverrideListenersRequestSend(msg)) {
send = false;
} else if (!notifyListenerRequestSend(msg)) {
// One of the listeners has told us to drop the request
return;
}
}
try {
// bug occur where response cannot be processed by various listener
// first so streaming feature was disabled
// getHttpSender().sendAndReceive(msg, httpOut, buffer);
if (excluded) {
getHttpSender().sendAndReceive(msg, EXCLUDED_REQ_CONFIG);
} else if (send) {
if (msg.getResponseHeader().isEmpty()) {
// Normally the response is empty.
// The only reason it wont be is if a script or other ext has
// deliberately 'hijacked' this request
// We dont jsut set send=false as this then means it wont appear in the
// History tab
getHttpSender().sendAndReceive(msg);
}
if (proxyParam.isAlwaysDecodeGzip()) {
decodeResponseIfNeeded(msg);
}
if (!notifyOverrideListenersResponseReceived(msg)) {
if (!notifyListenerResponseReceive(msg)) {
// One of the listeners has told us to drop the response
return;
}
}
}
// notifyWrittenToForwardProxy();
} catch (org.apache.commons.httpclient.HttpException e) {
// System.out.println("HttpException");
throw e;
} catch (SocketTimeoutException e) {
String message =
Constant.messages.getString(
"proxy.error.readtimeout",
msg.getRequestHeader().getURI(),
connectionParam.getTimeoutInSecs());
log.warn(message);
setErrorResponse(msg, GATEWAY_TIMEOUT_RESPONSE_STATUS, message);
if (!excluded) {
notifyListenerResponseReceive(msg);
}
} catch (IOException e) {
setErrorResponse(msg, BAD_GATEWAY_RESPONSE_STATUS, e);
if (!excluded) {
notifyListenerResponseReceive(msg);
}
// throw e;
}
try {
writeHttpResponse(msg, httpOut);
} catch (IOException e) {
StringBuilder strBuilder = new StringBuilder(200);
strBuilder.append("Failed to write/forward the HTTP response to the client: ");
strBuilder.append(e.getClass().getName());
if (e.getMessage() != null) {
strBuilder.append(": ").append(e.getMessage());
}
log.warn(strBuilder.toString());
}
} // release semaphore
org.zaproxy.zap.ZapGetMethod method =
(org.zaproxy.zap.ZapGetMethod) msg.getUserObject();
keepSocketOpen = notifyPersistentConnectionListener(msg, inSocket, method);
if (keepSocketOpen) {
// do not wait for close
break;
}
} while (!isConnectionClose(msg) && !inSocket.isClosed());
}
static void decodeResponseIfNeeded(HttpMessage msg) {
HttpBody body = msg.getResponseBody();
if (body.getContentEncodings().isEmpty() || body.hasContentEncodingErrors()) {
return;
}
body.setBody(body.getContent());
body.setContentEncodings(Collections.emptyList());
HttpHeader header = msg.getResponseHeader();
header.setHeader(HttpHeader.CONTENT_ENCODING, null);
if (header.getHeader(HttpHeader.CONTENT_LENGTH) != null) {
header.setContentLength(body.length());
}
}
private boolean isConnectionClose(HttpMessage msg) {
if (msg == null || msg.getResponseHeader().isEmpty()) {
return true;
}
if (msg.getRequestHeader().isConnectionClose()) {
return true;
}
if (msg.getResponseHeader().isConnectionClose()) {
return true;
}
if (msg.getResponseHeader().getContentLength() == -1
&& msg.getResponseBody().length() > 0) {
// no length and body > 0 must terminate otherwise cannot there is no way for client
// browser to know the length.
// terminate early can give better response by client.
return true;
}
return false;
}
protected void disconnect() {
try {
if (httpIn != null) {
httpIn.close();
}
} catch (Exception e) {
log.debug(e.getMessage(), e);
}
try {
if (httpOut != null) {
httpOut.close();
}
} catch (Exception e) {
log.debug(e.getMessage(), e);
}
org.parosproxy.paros.network.HttpUtil.closeSocket(inSocket);
if (httpSender != null) {
httpSender.shutdown();
}
}
/**
* Go through each observers to process a request in each observers. The method can be modified
* in each observers.
*
* @param httpMessage the request that was received from the client and may be forwarded to the
* server
* @return {@code true} if the message should be forwarded to the server, {@code false}
* otherwise
*/
private boolean notifyListenerRequestSend(HttpMessage httpMessage) {
ProxyListener listener = null;
List<ProxyListener> listenerList = parentServer.getListenerList();
for (int i = 0; i < listenerList.size(); i++) {
listener = listenerList.get(i);
try {
if (!listener.onHttpRequestSend(httpMessage)) {
return false;
}
} catch (Exception e) {
log.error("An error occurred while notifying listener:", e);
}
}
return true;
}
/**
* Go thru each observers and process the http message in each observers. The msg can be changed
* by each observers.
*
* @param httpMessage the response that was received from the server and may be forwarded to the
* client
* @return {@code true} if the message should be forwarded to the client, {@code false}
* otherwise
*/
private boolean notifyListenerResponseReceive(HttpMessage httpMessage) {
ProxyListener listener = null;
List<ProxyListener> listenerList = parentServer.getListenerList();
for (int i = 0; i < listenerList.size(); i++) {
listener = listenerList.get(i);
try {
if (!listener.onHttpResponseReceive(httpMessage)) {
return false;
}
} catch (Exception e) {
log.error("An error occurred while notifying listener:", e);
}
}
return true;
}
private boolean notifyOverrideListenersRequestSend(HttpMessage httpMessage) {
for (OverrideMessageProxyListener listener :
parentServer.getOverrideMessageProxyListeners()) {
try {
if (listener.onHttpRequestSend(httpMessage)) {
return true;
}
} catch (Exception e) {
log.error("An error occurred while notifying listener:", e);
}
}
return false;
}
private boolean notifyOverrideListenersResponseReceived(HttpMessage httpMessage) {
for (OverrideMessageProxyListener listener :
parentServer.getOverrideMessageProxyListeners()) {
try {
if (listener.onHttpResponseReceived(httpMessage)) {
return true;
}
} catch (Exception e) {
log.error("An error occurred while notifying listener:", e);
}
}
return false;
}
/**
* Go thru each listener and offer him to take over the connection. The first observer that
* returns true gets exclusive rights.
*
* @param httpMessage Contains HTTP request & response.
* @param inSocket Encapsulates the TCP connection to the browser.
* @param method Provides more power to process response.
* @return Boolean to indicate if socket should be kept open.
*/
private boolean notifyPersistentConnectionListener(
HttpMessage httpMessage, Socket inSocket, org.zaproxy.zap.ZapGetMethod method) {
boolean keepSocketOpen = false;
PersistentConnectionListener listener = null;
List<PersistentConnectionListener> listenerList =
parentServer.getPersistentConnectionListenerList();
for (int i = 0; i < listenerList.size(); i++) {
listener = listenerList.get(i);
try {
if (listener.onHandshakeResponse(httpMessage, inSocket, method)) {
// inform as long as one listener wishes to overtake the connection
keepSocketOpen = true;
break;
}
} catch (Exception e) {
log.error("An error occurred while notifying listener:", e);
}
}
return keepSocketOpen;
}
/**
* Tells whether or not the given {@code header} has a request to the (parent) proxy itself.
*
* <p>The request is to the proxy itself if the following conditions are met:
*
* <ol>
* <li>The requested port is the one that the proxy is bound to;
* <li>The requested domain is {@link API#API_DOMAIN} or, the requested address is one of the
* addresses the proxy is listening to.
* </ol>
*
* @param header the request that will be checked
* @return {@code true} if it is a request to the proxy itself, {@code false} otherwise.
* @see #isProxyAddress(InetAddress)
*/
private boolean isRecursive(HttpRequestHeader header) {
try {
if (header.getHostPort() == inSocket.getLocalPort()) {
String targetDomain = header.getHostName();
if (API.API_DOMAIN.equals(targetDomain)) {
return true;
}
if (isProxyAddress(InetAddress.getByName(targetDomain))) {
return true;
}
}
} catch (Exception e) {
// ZAP: Log exceptions
log.warn(e.getMessage(), e);
}
return false;
}
/**
* Tells whether or not the given {@code address} is one of address(es) the (parent) proxy is
* listening to.
*
* <p>If the proxy is listening to any address it checks whether the given {@code address} is a
* local address or if it belongs to a network interface. If not listening to any address, it
* checks if it's the one it is listening to.
*
* @param address the address that will be checked
* @return {@code true} if it is one of the addresses the proxy is listening to, {@code false}
* otherwise.
* @see #isLocalAddress(InetAddress)
* @see #isNetworkInterfaceAddress(InetAddress)
*/
private boolean isProxyAddress(InetAddress address) {
if (parentServer.getProxyParam().isProxyIpAnyLocalAddress()) {
if (isLocalAddress(address) || isNetworkInterfaceAddress(address)) {
return true;
}
} else if (address.equals(inSocket.getLocalAddress())) {
return true;
}
return false;
}
/**
* Tells whether or not the given {@code address} is a loopback, a site local or any local
* address.
*
* @param address the address that will be checked
* @return {@code true} if the address is loopback, site local or any local address, {@code
* false} otherwise.
* @see InetAddress#isLoopbackAddress()
* @see InetAddress#isSiteLocalAddress()
* @see InetAddress#isAnyLocalAddress()
*/
private static boolean isLocalAddress(InetAddress address) {
return address.isLoopbackAddress()
|| address.isSiteLocalAddress()
|| address.isAnyLocalAddress();
}
/**
* Tells whether or not the given {@code address} belongs to any of the network interfaces.
*
* @param address the address that will be checked
* @return {@code true} if the address belongs to any of the network interfaces, {@code false}
* otherwise.
* @see NetworkInterface#getByInetAddress(InetAddress)
*/
private static boolean isNetworkInterfaceAddress(InetAddress address) {
try {
if (NetworkInterface.getByInetAddress(address) != null) {
return true;
}
} catch (SocketException e) {
log.warn("Failed to check if an address is from a network interface:", e);
}
return false;
}
private void removeUnsupportedEncodings(HttpMessage msg) {
String encoding = msg.getRequestHeader().getHeader(HttpHeader.ACCEPT_ENCODING);
if (encoding == null) {
return;
}
// No encodings supported in practise (HttpResponseBody needs to support them, which it
// doesn't, yet).
msg.getRequestHeader().setHeader(HttpHeader.ACCEPT_ENCODING, null);
}
protected HttpSender getHttpSender() {
if (httpSender == null) {
httpSender = new HttpSender(connectionParam, true, HttpSender.PROXY_INITIATOR);
}
return httpSender;
}
static boolean isAnyProxyThreadRunning() {
return !proxyThreadList.isEmpty();
}
protected boolean isProcessCache(HttpMessage msg) throws IOException {
if (!parentServer.isEnableCacheProcessing()) {
return false;
}
if (parentServer.getCacheProcessingList().isEmpty()) {
return false;
}
CacheProcessingItem item = parentServer.getCacheProcessingList().get(0);
if (msg.equals(item.message)) {
HttpMessage newMsg = item.message.cloneAll();
msg.setResponseHeader(newMsg.getResponseHeader());
msg.setResponseBody(newMsg.getResponseBody());
writeHttpResponse(msg, httpOut);
return true;
} else {
try {
RecordHistory history =
Model.getSingleton()
.getDb()
.getTableHistory()
.getHistoryCache(item.reference, msg);
if (history == null) {
return false;
}
msg.setResponseHeader(history.getHttpMessage().getResponseHeader());
msg.setResponseBody(history.getHttpMessage().getResponseBody());
writeHttpResponse(msg, httpOut);
// System.out.println("cached:" +
// msg.getRequestHeader().getURI().toString());
return true;
} catch (Exception e) {
return true;
}
}
// return false;
}
}
| zaproxy/zaproxy | zap/src/main/java/org/parosproxy/paros/core/proxy/ProxyThread.java |
1,448 | /*******************************************************************************
* ___ _ ____ ____
* / _ \ _ _ ___ ___| |_| _ \| __ )
* | | | | | | |/ _ \/ __| __| | | | _ \
* | |_| | |_| | __/\__ \ |_| |_| | |_) |
* \__\_\\__,_|\___||___/\__|____/|____/
*
* Copyright (c) 2014-2019 Appsicle
* Copyright (c) 2019-2024 QuestDB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package io.questdb.cutlass.http;
import io.questdb.cutlass.http.ex.RetryFailedOperationException;
import io.questdb.mp.*;
import io.questdb.network.*;
import io.questdb.std.Misc;
import io.questdb.std.Os;
import io.questdb.std.datetime.millitime.MillisecondClock;
import org.jetbrains.annotations.Nullable;
import java.io.Closeable;
import java.util.PriorityQueue;
public class WaitProcessor extends SynchronizedJob implements RescheduleContext, Closeable {
private final MillisecondClock clock;
private final double exponentialWaitMultiplier;
private final Sequence inPubSequence;
private final RingQueue<RetryHolder> inQueue;
private final Sequence inSubSequence;
private final long maxWaitCapMs;
private final PriorityQueue<Retry> nextRerun;
private final IODispatcher<HttpConnectionContext> dispatcher;
private final Sequence outPubSequence;
private final RingQueue<RetryHolder> outQueue;
private final Sequence outSubSequence;
public WaitProcessor(WaitProcessorConfiguration configuration, IODispatcher<HttpConnectionContext> dispatcher) {
clock = configuration.getClock();
maxWaitCapMs = configuration.getMaxWaitCapMs();
exponentialWaitMultiplier = configuration.getExponentialWaitMultiplier();
nextRerun = new PriorityQueue<>(configuration.getInitialWaitQueueSize(), WaitProcessor::compareRetriesInQueue);
this.dispatcher = dispatcher;
int retryQueueLength = configuration.getMaxProcessingQueueSize();
inQueue = new RingQueue<>(RetryHolder::new, retryQueueLength);
inPubSequence = new MPSequence(retryQueueLength);
inSubSequence = new SCSequence();
outQueue = new RingQueue<>(RetryHolder::new, retryQueueLength);
outPubSequence = new SPSequence(retryQueueLength);
outSubSequence = new MCSequence(retryQueueLength);
inPubSequence.then(inSubSequence).then(inPubSequence);
outPubSequence.then(outSubSequence).then(outPubSequence);
}
@Override
public void close() {
processInQueue(); // Process incoming queue to close all contexts
for (int i = 0, n = nextRerun.size(); i < n; i++) {
Misc.free(nextRerun.poll());
}
}
@Override
// This supposed to run in http execution thread / job
public void reschedule(Retry retry) {
reschedule(retry, 0, 0);
}
// This hijacks http execution thread / job and runs retries in it.
public boolean runReruns(HttpRequestProcessorSelector selector) {
boolean useful = false;
while (true) {
Retry retry = getNextRerun();
if (retry != null) {
useful = true;
run(selector, retry);
} else {
return useful;
}
}
}
private void run(HttpRequestProcessorSelector selector, Retry retry) {
try {
if (!retry.tryRerun(selector, this)) {
try {
reschedule(retry, retry.getAttemptDetails().attempt + 1, retry.getAttemptDetails().waitStartTimestamp);
} catch (RetryFailedOperationException e) {
retry.fail(selector, e);
}
}
} catch (PeerDisconnectedException e) {
HttpConnectionContext context = (HttpConnectionContext) retry;
dispatcher.disconnect((HttpConnectionContext) retry, IODispatcher.DISCONNECT_REASON_KICKED_OUT_AT_RECV);
} catch (PeerIsSlowToReadException e) {
HttpConnectionContext context = (HttpConnectionContext) retry;
dispatcher.registerChannel(context, IOOperation.WRITE);
} catch (PeerIsSlowToWriteException e) {
HttpConnectionContext context = (HttpConnectionContext) retry;
dispatcher.registerChannel(context, IOOperation.READ);
} catch (ServerDisconnectException e) {
HttpConnectionContext context = (HttpConnectionContext) retry;
dispatcher.disconnect((HttpConnectionContext) retry, context.getDisconnectReason());
}
}
@Override
public boolean runSerially() {
return processInQueue() || sendToOutQueue();
}
private static int compareRetriesInQueue(Retry r1, Retry r2) {
// r1, r2 are always not null, null retries are not queued
RetryAttemptAttributes a1 = r1.getAttemptDetails();
RetryAttemptAttributes a2 = r2.getAttemptDetails();
return Long.compare(a1.nextRunTimestamp, a2.nextRunTimestamp);
}
private long calculateNextTimestamp(RetryAttemptAttributes attemptAttributes) {
if (attemptAttributes.attempt == 0) {
// First retry after fixed time of 2ms
return attemptAttributes.lastRunTimestamp + 2;
}
// 'exponentialWaitMultiplier' times wait time starting until it is 'maxWaitCapMs' sec
long totalWait = attemptAttributes.lastRunTimestamp - attemptAttributes.waitStartTimestamp;
return Math.min(maxWaitCapMs, Math.max(4L, (long) (totalWait * exponentialWaitMultiplier))) + attemptAttributes.lastRunTimestamp;
}
private @Nullable Retry getNextRerun() {
long cursor = outSubSequence.next();
// -2 = there was a contest for queue index and this thread has lost
if (cursor < 0) {
return null;
}
RetryHolder retryHolder = outQueue.get(cursor);
Retry r = retryHolder.retry;
retryHolder.retry = null;
outSubSequence.done(cursor);
return r;
}
// Process incoming queue and put it on priority queue with next timestamp to rerun
private boolean processInQueue() {
boolean any = false;
while (true) {
long cursor = inSubSequence.next();
// -2 = there was a contest for queue index and this thread has lost
if (cursor < -1) {
Os.pause();
continue;
}
// -1 = queue is empty. All done.
if (cursor < 0) {
return any;
}
Retry retry;
try {
RetryHolder toRun = inQueue.get(cursor);
retry = toRun.retry;
toRun.retry = null;
} finally {
inSubSequence.done(cursor);
}
retry.getAttemptDetails().nextRunTimestamp = calculateNextTimestamp(retry.getAttemptDetails());
nextRerun.add(retry);
any = true;
}
}
private void reschedule(Retry retry, int attempt, long waitStartMs) {
long now = clock.getTicks();
retry.getAttemptDetails().attempt = attempt;
retry.getAttemptDetails().lastRunTimestamp = now;
retry.getAttemptDetails().waitStartTimestamp = attempt == 0 ? now : waitStartMs;
while (true) {
long cursor = inPubSequence.next();
// -2 = there was a contest for queue index and this thread has lost
if (cursor < -1) {
Os.pause();
continue;
}
// -1 = queue is full. It means there are already too many retries waiting
// Send error to client.
if (cursor < 0) {
throw RetryFailedOperationException.INSTANCE;
}
RetryHolder retryHolder = inQueue.get(cursor);
retryHolder.retry = retry;
inPubSequence.done(cursor);
return;
}
}
private boolean sendToOutQueue() {
boolean useful = false;
final long now = clock.getTicks();
while (nextRerun.size() > 0) {
Retry next = nextRerun.peek();
if (next.getAttemptDetails().nextRunTimestamp <= now) {
useful = true;
Retry retry = nextRerun.poll();
if (!sendToOutQueue(retry)) {
nextRerun.add(retry);
return true;
}
} else {
// All reruns are in the future.
return useful;
}
}
return useful;
}
private boolean sendToOutQueue(Retry retry) {
while (true) {
long cursor = outPubSequence.next();
// -2 = there was a contest for queue index and this thread has lost
if (cursor < -1) {
Os.pause();
continue;
}
if (cursor < 0) {
// Cannot put to out queue. It is full. Release job and retry next run.
return false;
}
RetryHolder retryHolderOut = outQueue.get(cursor);
retryHolderOut.retry = retry;
outPubSequence.done(cursor);
return true;
}
}
}
| questdb/questdb | core/src/main/java/io/questdb/cutlass/http/WaitProcessor.java |
1,449 | /*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package androidx.appcompat.widget;
import static androidx.annotation.RestrictTo.Scope.LIBRARY_GROUP_PREFIX;
import android.content.Context;
import android.content.res.Configuration;
import android.content.res.Resources;
import android.os.Build;
import android.transition.Transition;
import android.util.AttributeSet;
import android.util.Log;
import android.view.KeyEvent;
import android.view.MenuItem;
import android.view.MotionEvent;
import android.view.View;
import android.widget.HeaderViewListAdapter;
import android.widget.ListAdapter;
import android.widget.PopupWindow;
import androidx.annotation.DoNotInline;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RequiresApi;
import androidx.annotation.RestrictTo;
import androidx.appcompat.view.menu.ListMenuItemView;
import androidx.appcompat.view.menu.MenuAdapter;
import androidx.appcompat.view.menu.MenuBuilder;
import java.lang.reflect.Method;
/**
* A MenuPopupWindow represents the popup window for menu.
*
* MenuPopupWindow is mostly same as ListPopupWindow, but it has customized
* behaviors specific to menus,
*
*/
@RestrictTo(LIBRARY_GROUP_PREFIX)
public class MenuPopupWindow extends ListPopupWindow implements MenuItemHoverListener {
private static final String TAG = "MenuPopupWindow";
private static Method sSetTouchModalMethod;
static {
try {
if (Build.VERSION.SDK_INT <= 28) {
sSetTouchModalMethod = PopupWindow.class.getDeclaredMethod(
"setTouchModal", boolean.class);
}
} catch (NoSuchMethodException e) {
Log.i(TAG, "Could not find method setTouchModal() on PopupWindow. Oh well.");
}
}
private MenuItemHoverListener mHoverListener;
public MenuPopupWindow(@NonNull Context context, @Nullable AttributeSet attrs, int defStyleAttr,
int defStyleRes) {
super(context, attrs, defStyleAttr, defStyleRes);
}
@NonNull
@Override
DropDownListView createDropDownListView(Context context, boolean hijackFocus) {
MenuDropDownListView view = new MenuDropDownListView(context, hijackFocus);
view.setHoverListener(this);
return view;
}
public void setEnterTransition(Object enterTransition) {
if (Build.VERSION.SDK_INT >= 23) {
Api23Impl.setEnterTransition(mPopup, (Transition) enterTransition);
}
}
public void setExitTransition(Object exitTransition) {
if (Build.VERSION.SDK_INT >= 23) {
Api23Impl.setExitTransition(mPopup, (Transition) exitTransition);
}
}
public void setHoverListener(MenuItemHoverListener hoverListener) {
mHoverListener = hoverListener;
}
/**
* Set whether this window is touch modal or if outside touches will be sent to
* other windows behind it.
*/
public void setTouchModal(final boolean touchModal) {
if (Build.VERSION.SDK_INT <= 28) {
if (sSetTouchModalMethod != null) {
try {
sSetTouchModalMethod.invoke(mPopup, touchModal);
} catch (Exception e) {
Log.i(TAG, "Could not invoke setTouchModal() on PopupWindow. Oh well.");
}
}
} else {
Api29Impl.setTouchModal(mPopup, touchModal);
}
}
@Override
public void onItemHoverEnter(@NonNull MenuBuilder menu, @NonNull MenuItem item) {
// Forward up the chain
if (mHoverListener != null) {
mHoverListener.onItemHoverEnter(menu, item);
}
}
@Override
public void onItemHoverExit(@NonNull MenuBuilder menu, @NonNull MenuItem item) {
// Forward up the chain
if (mHoverListener != null) {
mHoverListener.onItemHoverExit(menu, item);
}
}
/**
*/
@RestrictTo(LIBRARY_GROUP_PREFIX)
public static class MenuDropDownListView extends DropDownListView {
final int mAdvanceKey;
final int mRetreatKey;
private MenuItemHoverListener mHoverListener;
private MenuItem mHoveredMenuItem;
public MenuDropDownListView(Context context, boolean hijackFocus) {
super(context, hijackFocus);
final Resources res = context.getResources();
final Configuration config = res.getConfiguration();
if (View.LAYOUT_DIRECTION_RTL == config.getLayoutDirection()) {
mAdvanceKey = KeyEvent.KEYCODE_DPAD_LEFT;
mRetreatKey = KeyEvent.KEYCODE_DPAD_RIGHT;
} else {
mAdvanceKey = KeyEvent.KEYCODE_DPAD_RIGHT;
mRetreatKey = KeyEvent.KEYCODE_DPAD_LEFT;
}
}
public void setHoverListener(MenuItemHoverListener hoverListener) {
mHoverListener = hoverListener;
}
public void clearSelection() {
setSelection(INVALID_POSITION);
}
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
ListMenuItemView selectedItem = (ListMenuItemView) getSelectedView();
if (selectedItem != null && keyCode == mAdvanceKey) {
if (selectedItem.isEnabled() && selectedItem.getItemData().hasSubMenu()) {
performItemClick(
selectedItem,
getSelectedItemPosition(),
getSelectedItemId());
}
return true;
} else if (selectedItem != null && keyCode == mRetreatKey) {
setSelection(INVALID_POSITION);
// Close only the top-level menu.
final ListAdapter adapter = getAdapter();
final MenuAdapter menuAdapter;
if (adapter instanceof HeaderViewListAdapter) {
menuAdapter =
(MenuAdapter) ((HeaderViewListAdapter) adapter).getWrappedAdapter();
} else {
menuAdapter = (MenuAdapter) adapter;
}
menuAdapter.getAdapterMenu().close(false /* closeAllMenus */);
return true;
}
return super.onKeyDown(keyCode, event);
}
@Override
public boolean onHoverEvent(MotionEvent ev) {
// Dispatch any changes in hovered item index to the listener.
if (mHoverListener != null) {
// The adapter may be wrapped. Adjust the index if necessary.
final int headersCount;
final MenuAdapter menuAdapter;
final ListAdapter adapter = getAdapter();
if (adapter instanceof HeaderViewListAdapter) {
final HeaderViewListAdapter headerAdapter = (HeaderViewListAdapter) adapter;
headersCount = headerAdapter.getHeadersCount();
menuAdapter = (MenuAdapter) headerAdapter.getWrappedAdapter();
} else {
headersCount = 0;
menuAdapter = (MenuAdapter) adapter;
}
// Find the menu item for the view at the event coordinates.
MenuItem menuItem = null;
if (ev.getAction() != MotionEvent.ACTION_HOVER_EXIT) {
final int position = pointToPosition((int) ev.getX(), (int) ev.getY());
if (position != INVALID_POSITION) {
final int itemPosition = position - headersCount;
if (itemPosition >= 0 && itemPosition < menuAdapter.getCount()) {
menuItem = menuAdapter.getItem(itemPosition);
}
}
}
final MenuItem oldMenuItem = mHoveredMenuItem;
if (oldMenuItem != menuItem) {
final MenuBuilder menu = menuAdapter.getAdapterMenu();
if (oldMenuItem != null) {
mHoverListener.onItemHoverExit(menu, oldMenuItem);
}
mHoveredMenuItem = menuItem;
if (menuItem != null) {
mHoverListener.onItemHoverEnter(menu, menuItem);
}
}
}
return super.onHoverEvent(ev);
}
}
@RequiresApi(23)
static class Api23Impl {
private Api23Impl() {
// This class is not instantiable.
}
@DoNotInline
static void setEnterTransition(PopupWindow popupWindow, Transition enterTransition) {
popupWindow.setEnterTransition(enterTransition);
}
@DoNotInline
static void setExitTransition(PopupWindow popupWindow, Transition exitTransition) {
popupWindow.setExitTransition(exitTransition);
}
}
@RequiresApi(29)
static class Api29Impl {
private Api29Impl() {
// This class is not instantiable.
}
@DoNotInline
static void setTouchModal(PopupWindow popupWindow, boolean touchModal) {
popupWindow.setTouchModal(touchModal);
}
}
} | androidx/androidx | appcompat/appcompat/src/main/java/androidx/appcompat/widget/MenuPopupWindow.java |
1,450 | /* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved.
* Copyright (C) 2006 Constantin Kaplinsky. All Rights Reserved.
* Copyright (C) 2009 Paul Donohue. All Rights Reserved.
* Copyright (C) 2010, 2012-2013 D. R. Commander. All Rights Reserved.
* Copyright (C) 2011-2019 Brian P. Hinz
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
* USA.
*/
//
// DesktopWindow is an AWT Canvas representing a VNC desktop.
//
// Methods on DesktopWindow are called from both the GUI thread and the thread
// which processes incoming RFB messages ("the RFB thread"). This means we
// need to be careful with synchronization here.
//
package com.tigervnc.vncviewer;
import java.awt.*;
import java.awt.event.*;
import java.awt.geom.AffineTransform;
import java.awt.image.*;
import java.nio.*;
import java.util.*;
import javax.swing.*;
import javax.imageio.*;
import java.io.*;
import com.tigervnc.rfb.*;
import com.tigervnc.rfb.Cursor;
import com.tigervnc.rfb.Exception;
import com.tigervnc.rfb.Point;
import static java.awt.event.KeyEvent.*;
import static com.tigervnc.vncviewer.Parameters.*;
import static com.tigervnc.rfb.Keysymdef.*;
class Viewport extends JPanel implements ActionListener {
static LogWriter vlog = new LogWriter("Viewport");
enum ID { EXIT, FULLSCREEN, MINIMIZE, RESIZE, NEWVIEWER,
CTRL, ALT, MENUKEY, CTRLALTDEL, CLIPBOARD,
REFRESH, OPTIONS, INFO, ABOUT, DISMISS }
enum MENU { INACTIVE, TOGGLE, VALUE, RADIO,
INVISIBLE, SUBMENU_POINTER, SUBMENU, DIVIDER }
public Viewport(int w, int h, PixelFormat serverPF, CConn cc_)
{
cc = cc_;
setScaledSize(w, h);
frameBuffer = createFramebuffer(serverPF, w, h);
assert(frameBuffer != null);
setBackground(Color.BLACK);
cc.setFramebuffer(frameBuffer);
contextMenu = new JPopupMenu();
OptionsDialog.addCallback("handleOptions", this);
addMouseListener(new MouseAdapter() {
public void mouseClicked(MouseEvent e) { }
public void mouseEntered(MouseEvent e) { handle(e); }
public void mouseExited(MouseEvent e) { handle(e); }
public void mouseReleased(MouseEvent e) { handle(e); }
public void mousePressed(MouseEvent e) { handle(e); }
});
addMouseWheelListener(new MouseAdapter() {
public void mouseWheelMoved(MouseWheelEvent e) { handle(e); }
});
addMouseMotionListener(new MouseMotionAdapter() {
public void mouseDragged(MouseEvent e) { handle(e); }
public void mouseMoved(MouseEvent e) { handle(e); }
});
addKeyListener(new KeyAdapter() {
public void keyTyped(KeyEvent e) { }
public void keyPressed(KeyEvent e) { handleSystemEvent(e); }
public void keyReleased(KeyEvent e) { handleSystemEvent(e); }
});
addFocusListener(new FocusAdapter() {
public void focusGained(FocusEvent e) {
ClipboardDialog.clientCutText();
}
public void focusLost(FocusEvent e) {
releaseDownKeys();
}
});
// Override default key bindings from L&F
getActionMap().put("null", new AbstractAction() {
public void actionPerformed(ActionEvent e) { }
});
ArrayList<KeyStroke> keys = new ArrayList<KeyStroke>();
keys.add(KeyStroke.getKeyStroke(KeyEvent.VK_F10, 0, true));
keys.add(KeyStroke.getKeyStroke(KeyEvent.VK_ALT, 0, true));
for (int i=0; i<keys.size(); i++)
getInputMap(JComponent.WHEN_FOCUSED).put(keys.get(i), "null");
setFocusTraversalKeysEnabled(false);
setFocusable(true);
setMenuKey();
// Send a fake pointer event so that the server will stop rendering
// a server-side cursor. Ideally we'd like to send the actual pointer
// position, but we can't really tell when the window manager is done
// placing us so we don't have a good time for that.
handlePointerEvent(new Point(w/2, h/2), 0);
}
// Most efficient format (from Viewport's point of view)
public PixelFormat getPreferredPF()
{
return frameBuffer.getPF();
}
// Copy the areas of the framebuffer that have been changed (damaged)
// to the displayed window.
public void updateWindow() {
Rect r = frameBuffer.getDamage();
if (!r.is_empty()) {
if (cc.server.width() != scaledWidth ||
cc.server.height() != scaledHeight) {
AffineTransform t = new AffineTransform();
t.scale((double)scaleRatioX, (double)scaleRatioY);
Rectangle s = new Rectangle(r.tl.x, r.tl.y, r.width(), r.height());
s = t.createTransformedShape(s).getBounds();
paintImmediately(s.x, s.y, s.width, s.height);
} else {
paintImmediately(r.tl.x, r.tl.y, r.width(), r.height());
}
}
}
static final int[] dotcursor_xpm = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000,
0x00000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000,
0x00000000, 0xff000000, 0xff000000, 0xff000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
public void setCursor(int width, int height, Point hotspot,
byte[] data)
{
int i;
if (cursor != null)
cursor.flush();
for (i = 0; i < width*height; i++)
if (data[i*4 + 3] != 0) break;
if ((i == width*height) && dotWhenNoCursor.getValue()) {
vlog.debug("cursor is empty - using dot");
cursor = new BufferedImage(5, 5, BufferedImage.TYPE_INT_ARGB_PRE);
cursor.setRGB(0, 0, 5, 5, dotcursor_xpm, 0, 5);
cursorHotspot.x = cursorHotspot.y = 3;
} else {
if ((width == 0) || (height == 0)) {
cursor = new BufferedImage(tk.getBestCursorSize(0, 0).width,
tk.getBestCursorSize(0, 0).height,
BufferedImage.TYPE_INT_ARGB_PRE);
cursorHotspot.x = cursorHotspot.y = 0;
} else {
IntBuffer buffer = IntBuffer.allocate(width*height);
buffer.put(ByteBuffer.wrap(data).asIntBuffer());
cursor =
new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB_PRE);
cursor.setRGB(0, 0, width, height, buffer.array(), 0, width);
cursorHotspot = hotspot;
}
}
int cw = (int) Math.floor((float) cursor.getWidth() * scaleRatioX);
int ch = (int) Math.floor((float) cursor.getHeight() * scaleRatioY);
int x = cursorHotspot.x;
int y = cursorHotspot.y;
Dimension cs = tk.getBestCursorSize(cw, ch);
if (cs.width != cursor.getWidth() || cs.height != cursor.getHeight()) {
cw = VncViewer.os.startsWith("windows") ? Math.min(cw, cs.width) : cs.width;
ch = VncViewer.os.startsWith("windows") ? Math.min(ch, cs.height) : cs.height;
BufferedImage tmp = new BufferedImage(cs.width, cs.height, BufferedImage.TYPE_INT_ARGB_PRE);
Graphics2D g2 = tmp.createGraphics();
g2.drawImage(cursor, 0, 0, cw, ch, 0, 0, width, height, null);
g2.dispose();
x = (int) Math.min(Math.floor((float) x * (float) cw / (float) width), cw - 1);
y = (int) Math.min(Math.floor((float) y * (float) ch / (float) height), ch - 1);
cursor = tmp;
}
setCursor(cursor, x, y);
}
private void setCursor(Image img, int x, int y)
{
java.awt.Point hotspot;
java.awt.Cursor softCursor;
String name = "rfb cursor";
hotspot = new java.awt.Point(x, y);
softCursor = tk.createCustomCursor(img, hotspot, name);
setCursor(softCursor);
}
public void resize(int x, int y, int w, int h) {
if ((w != frameBuffer.width()) || (h != frameBuffer.height())) {
vlog.debug("Resizing framebuffer from "+frameBuffer.width()+"x"+
frameBuffer.height()+" to "+w+"x"+h);
frameBuffer = createFramebuffer(frameBuffer.getPF(), w, h);
assert(frameBuffer != null);
cc.setFramebuffer(frameBuffer);
}
setScaledSize(w, h);
}
public int handle(MouseEvent e)
{
int buttonMask, wheelMask;
switch (e.getID()) {
case MouseEvent.MOUSE_ENTERED:
if (cursor != null)
setCursor(cursor, cursorHotspot.x, cursorHotspot.y);
return 1;
case MouseEvent.MOUSE_EXITED:
setCursor(java.awt.Cursor.getDefaultCursor());
return 1;
case MouseEvent.MOUSE_PRESSED:
case MouseEvent.MOUSE_RELEASED:
case MouseEvent.MOUSE_DRAGGED:
case MouseEvent.MOUSE_MOVED:
case MouseEvent.MOUSE_WHEEL:
buttonMask = 0;
if ((e.getModifiersEx() & MouseEvent.BUTTON1_DOWN_MASK) != 0)
buttonMask |= 1;
if ((e.getModifiersEx() & MouseEvent.BUTTON2_DOWN_MASK) != 0)
buttonMask |= 2;
if ((e.getModifiersEx() & MouseEvent.BUTTON3_DOWN_MASK) != 0)
buttonMask |= 4;
if (e.getID() == MouseEvent.MOUSE_WHEEL) {
wheelMask = 0;
int clicks = ((MouseWheelEvent)e).getWheelRotation();
if (clicks < 0)
wheelMask |= e.isShiftDown() ? 32 : 8;
else
wheelMask |= e.isShiftDown() ? 64 : 16;
Point pt = new Point(e.getX(), e.getY());
for (int i = 0; i < Math.abs(clicks); i++) {
handlePointerEvent(pt, buttonMask|wheelMask);
handlePointerEvent(pt, buttonMask);
}
return 1;
}
handlePointerEvent(new Point(e.getX(), e.getY()), buttonMask);
return 1;
}
return -1;
}
private PlatformPixelBuffer createFramebuffer(PixelFormat pf, int w, int h)
{
PlatformPixelBuffer fb;
fb = new JavaPixelBuffer(w, h);
return fb;
}
//
// Callback methods to determine geometry of our Component.
//
public Dimension getPreferredSize() {
return new Dimension(scaledWidth, scaledHeight);
}
public Dimension getMinimumSize() {
return new Dimension(scaledWidth, scaledHeight);
}
public Dimension getMaximumSize() {
return new Dimension(scaledWidth, scaledHeight);
}
public void paintComponent(Graphics g) {
Graphics2D g2 = (Graphics2D)g;
synchronized(frameBuffer.getImage()) {
if (cc.server.width() != scaledWidth ||
cc.server.height() != scaledHeight) {
g2.setRenderingHint(RenderingHints.KEY_RENDERING,
RenderingHints.VALUE_RENDER_QUALITY);
g2.drawImage(frameBuffer.getImage(), 0, 0,
scaledWidth, scaledHeight, null);
} else {
g2.drawImage(frameBuffer.getImage(), 0, 0, null);
}
}
g2.dispose();
}
public void setScaledSize(int width, int height)
{
assert(width != 0 && height != 0);
String scaleString = scalingFactor.getValue();
if (remoteResize.getValue()) {
scaledWidth = width;
scaledHeight = height;
scaleRatioX = 1.00f;
scaleRatioY = 1.00f;
} else {
if (scaleString.matches("^[0-9]+$")) {
int scalingFactor = Integer.parseInt(scaleString);
scaledWidth =
(int)Math.floor((float)width * (float)scalingFactor/100.0);
scaledHeight =
(int)Math.floor((float)height * (float)scalingFactor/100.0);
} else if (scaleString.equalsIgnoreCase("Auto")) {
scaledWidth = width;
scaledHeight = height;
} else {
float widthRatio = (float)width / (float)cc.server.width();
float heightRatio = (float)height / (float)cc.server.height();
float ratio = Math.min(widthRatio, heightRatio);
scaledWidth = (int)Math.floor(cc.server.width() * ratio);
scaledHeight = (int)Math.floor(cc.server.height() * ratio);
}
scaleRatioX = (float)scaledWidth / (float)cc.server.width();
scaleRatioY = (float)scaledHeight / (float)cc.server.height();
}
if (scaledWidth != getWidth() || scaledHeight != getHeight())
setSize(new Dimension(scaledWidth, scaledHeight));
}
private void handlePointerEvent(Point pos, int buttonMask)
{
if (!viewOnly.getValue()) {
if (buttonMask != lastButtonMask || !pos.equals(lastPointerPos)) {
try {
if (cc.server.width() != scaledWidth ||
cc.server.height() != scaledHeight) {
int sx = (scaleRatioX == 1.00) ?
pos.x : (int)Math.floor(pos.x / scaleRatioX);
int sy = (scaleRatioY == 1.00) ?
pos.y : (int)Math.floor(pos.y / scaleRatioY);
pos = pos.translate(new Point(sx - pos.x, sy - pos.y));
}
cc.writer().writePointerEvent(pos, buttonMask);
} catch (Exception e) {
vlog.error("%s", e.getMessage());
cc.close();
}
}
lastPointerPos = pos;
lastButtonMask = buttonMask;
}
}
public void handleKeyPress(long keyCode, int keySym)
{
// Prevent recursion if the menu wants to send it's own
// activation key.
if ((menuKeySym != 0) && keySym == menuKeySym && !menuRecursion) {
popupContextMenu();
return;
}
if (viewOnly.getValue())
return;
if (keyCode == 0) {
vlog.error("No key code specified on key press");
return;
}
if (VncViewer.os.startsWith("mac os x")) {
// Alt on OS X behaves more like AltGr on other systems, and to get
// sane behaviour we should translate things in that manner for the
// remote VNC server. However that means we lose the ability to use
// Alt as a shortcut modifier. Do what RealVNC does and hijack the
// left command key as an Alt replacement.
switch (keySym) {
case XK_Meta_L:
keySym = XK_Alt_L;
break;
case XK_Meta_R:
keySym = XK_Super_L;
break;
case XK_Alt_L:
keySym = XK_Mode_switch;
break;
case XK_Alt_R:
keySym = XK_ISO_Level3_Shift;
break;
}
}
if (VncViewer.os.startsWith("windows")) {
// Ugly hack alert!
//
// Windows doesn't have a proper AltGr, but handles it using fake
// Ctrl+Alt. Unfortunately X11 doesn't generally like the combination
// Ctrl+Alt+AltGr, which we usually end up with when Xvnc tries to
// get everything in the correct state. Cheat and temporarily release
// Ctrl and Alt when we send some other symbol.
if (downKeySym.containsValue(XK_Control_L) &&
downKeySym.containsValue(XK_Alt_R)) {
vlog.debug("Faking release of AltGr (Ctrl_L+Alt_R)");
try {
cc.writer().writeKeyEvent(XK_Control_L, false);
cc.writer().writeKeyEvent(XK_Alt_R, false);
} catch (Exception e) {
vlog.error("%s", e.getMessage());
cc.close();
}
}
}
// Because of the way keyboards work, we cannot expect to have the same
// symbol on release as when pressed. This breaks the VNC protocol however,
// so we need to keep track of what keysym a key _code_ generated on press
// and send the same on release.
downKeySym.put(keyCode, keySym);
vlog.debug("Key pressed: 0x%016x => 0x%04x", keyCode, keySym);
try {
// Fake keycode?
if (keyCode > 0xffffffffL)
cc.writer().writeKeyEvent(keySym, true);
else
cc.writer().writeKeyEvent(keySym, true);
} catch (Exception e) {
vlog.error("%s", e.getMessage());
cc.close();
}
if (VncViewer.os.startsWith("windows")) {
// Ugly hack continued...
if (downKeySym.containsValue(XK_Control_L) &&
downKeySym.containsValue(XK_Alt_R)) {
vlog.debug("Restoring AltGr state");
try {
cc.writer().writeKeyEvent(XK_Control_L, true);
cc.writer().writeKeyEvent(XK_Alt_R, true);
} catch (Exception e) {
vlog.error("%s", e.getMessage());
cc.close();
}
}
}
}
public void handleKeyRelease(long keyCode)
{
Integer iter;
if (viewOnly.getValue())
return;
iter = downKeySym.get(keyCode);
if (iter == null) {
// These occur somewhat frequently so let's not spam them unless
// logging is turned up.
vlog.debug("Unexpected release of key code %d", keyCode);
return;
}
vlog.debug("Key released: 0x%016x => 0x%04x", keyCode, iter);
try {
if (keyCode > 0xffffffffL)
cc.writer().writeKeyEvent(iter, false);
else
cc.writer().writeKeyEvent(iter, false);
} catch (Exception e) {
vlog.error("%s", e.getMessage());
cc.close();
}
downKeySym.remove(keyCode);
}
private int handleSystemEvent(AWTEvent event)
{
if (event instanceof KeyEvent) {
KeyEvent ev = (KeyEvent)event;
if (KeyMap.get_keycode_fallback_extended(ev) == 0) {
// Not much we can do with this...
vlog.debug("Ignoring KeyEvent with unknown Java keycode");
return 0;
}
if (ev.getID() == KeyEvent.KEY_PRESSED) {
// Generate a fake keycode just for tracking if we can't figure
// out the proper one. Java virtual key codes aren't unique
// between left/right versions of keys, so we can't use them as
// indexes to the downKeySym map.
long keyCode = KeyMap.get_keycode_fallback_extended(ev) | ((long)ev.getKeyLocation()<<32);
// Pressing Ctrl wreaks havoc with the symbol lookup, so turn
// that off. But AltGr shows up as Ctrl_L+Alt_R in Windows, so
// construct a new KeyEvent that uses a proper AltGraph for the
// symbol lookup.
int keySym;
if (VncViewer.os.startsWith("windows") &&
downKeySym.containsValue(XK_Control_L) &&
downKeySym.containsValue(XK_Alt_R)) {
int mask = ev.getModifiers();
mask &= ~CTRL_MASK;
mask &= ~ALT_MASK;
mask |= ALT_GRAPH_MASK;
AWTKeyStroke ks =
AWTKeyStroke.getAWTKeyStroke(KeyMap.get_keycode_fallback_extended(ev), mask);
// The mask manipulations above break key combinations involving AltGr
// and a key with an accented letter on some keyboard layouts (i.e. IT).
// So the code should first try the modified event, but if it returns no
// symbol, the original event should be used.
final KeyEvent winev = new KeyEvent((JComponent)ev.getSource(), ev.getID(),
ev.getWhen(), mask, KeyMap.get_keycode_fallback_extended(ev),
ks.getKeyChar(), ev.getKeyLocation());
keySym = KeyMap.vkey_to_keysym(winev);
if (keySym == KeyMap.NoSymbol)
keySym = KeyMap.vkey_to_keysym(ev);
else
ev = winev;
} else {
keySym = KeyMap.vkey_to_keysym(ev);
}
if (keySym == KeyMap.NoSymbol)
vlog.error("No symbol for virtual key 0x%016x", keyCode);
if (VncViewer.os.startsWith("linux")) {
switch (keySym) {
// For the first few years, there wasn't a good consensus on what the
// Windows keys should be mapped to for X11. So we need to help out a
// bit and map all variants to the same key...
case XK_Hyper_L:
keySym = XK_Super_L;
break;
case XK_Hyper_R:
keySym = XK_Super_R;
break;
// There has been several variants for Shift-Tab over the years.
// RFB states that we should always send a normal tab.
case XK_ISO_Left_Tab:
keySym = XK_Tab;
break;
}
}
handleKeyPress(keyCode, keySym);
if (VncViewer.os.startsWith("mac os x")) {
// We don't get any release events for CapsLock, so we have to
// send the release right away.
if (keySym == XK_Caps_Lock)
handleKeyRelease(keyCode);
}
return 1;
} else if (ev.getID() == KeyEvent.KEY_RELEASED) {
long keyCode = KeyMap.get_keycode_fallback_extended(ev) | ((long)ev.getKeyLocation()<<32);
handleKeyRelease(keyCode);
return 1;
}
}
return 0;
}
private void initContextMenu()
{
contextMenu.setLightWeightPopupEnabled(false);
contextMenu.removeAll();
menu_add(contextMenu, "Exit viewer", KeyEvent.VK_X,
this, ID.EXIT, EnumSet.of(MENU.DIVIDER));
menu_add(contextMenu, "Full screen", KeyEvent.VK_F, this, ID.FULLSCREEN,
window().fullscreen_active() ?
EnumSet.of(MENU.TOGGLE, MENU.VALUE) : EnumSet.of(MENU.TOGGLE));
menu_add(contextMenu, "Minimize", KeyEvent.VK_Z,
this, ID.MINIMIZE, EnumSet.noneOf(MENU.class));
menu_add(contextMenu, "Resize window to session", KeyEvent.VK_W,
this, ID.RESIZE,
window().fullscreen_active() ?
EnumSet.of(MENU.INACTIVE, MENU.DIVIDER) : EnumSet.of(MENU.DIVIDER));
menu_add(contextMenu, "Clipboard viewer...", KeyEvent.VK_UNDEFINED,
this, ID.CLIPBOARD, EnumSet.of(MENU.DIVIDER));
menu_add(contextMenu, "Ctrl", KeyEvent.VK_C,
this, ID.CTRL,
menuCtrlKey ? EnumSet.of(MENU.TOGGLE, MENU.VALUE) : EnumSet.of(MENU.TOGGLE));
menu_add(contextMenu, "Alt", KeyEvent.VK_A,
this, ID.ALT,
menuAltKey ? EnumSet.of(MENU.TOGGLE, MENU.VALUE) : EnumSet.of(MENU.TOGGLE));
if (menuKeySym != 0) {
String sendMenuKey = String.format("Send %s", menuKey.getValueStr());
menu_add(contextMenu, sendMenuKey, menuKeyJava,
this, ID.MENUKEY, EnumSet.noneOf(MENU.class));
}
menu_add(contextMenu, "Send Ctrl-Alt-Del", KeyEvent.VK_D,
this, ID.CTRLALTDEL, EnumSet.of(MENU.DIVIDER));
menu_add(contextMenu, "Refresh screen", KeyEvent.VK_R,
this, ID.REFRESH, EnumSet.of(MENU.DIVIDER));
menu_add(contextMenu, "New connection...", KeyEvent.VK_N,
this, ID.NEWVIEWER, EnumSet.of(MENU.DIVIDER));
menu_add(contextMenu, "Options...", KeyEvent.VK_O,
this, ID.OPTIONS, EnumSet.noneOf(MENU.class));
menu_add(contextMenu, "Connection info...", KeyEvent.VK_I,
this, ID.INFO, EnumSet.noneOf(MENU.class));
menu_add(contextMenu, "About TigerVNC viewer...", KeyEvent.VK_T,
this, ID.ABOUT, EnumSet.of(MENU.DIVIDER));
menu_add(contextMenu, "Dismiss menu", KeyEvent.VK_M,
this, ID.DISMISS, EnumSet.noneOf(MENU.class));
}
static void menu_add(JPopupMenu menu, String text,
int shortcut, ActionListener cb,
ID data, EnumSet<MENU> flags)
{
JMenuItem item;
if (flags.contains(MENU.TOGGLE)) {
item = new JCheckBoxMenuItem(text, flags.contains(MENU.VALUE));
} else {
if (shortcut != 0)
item = new JMenuItem(text, shortcut);
else
item = new JMenuItem(text);
}
item.setActionCommand(data.toString());
item.addActionListener(cb);
item.setEnabled(!flags.contains(MENU.INACTIVE));
menu.add(item);
if (flags.contains(MENU.DIVIDER))
menu.addSeparator();
}
void popupContextMenu()
{
// initialize context menu before display
initContextMenu();
contextMenu.setCursor(java.awt.Cursor.getDefaultCursor());
contextMenu.show(this, lastPointerPos.x, lastPointerPos.y);
}
public void actionPerformed(ActionEvent ev)
{
switch(ID.valueOf(ev.getActionCommand())) {
case EXIT:
cc.close();
break;
case FULLSCREEN:
if (window().fullscreen_active())
window().fullscreen_off();
else
window().fullscreen_on();
break;
case MINIMIZE:
if (window().fullscreen_active())
window().fullscreen_off();
window().setExtendedState(JFrame.ICONIFIED);
break;
case RESIZE:
if (window().fullscreen_active())
break;
int dx = window().getInsets().left + window().getInsets().right;
int dy = window().getInsets().top + window().getInsets().bottom;
window().setSize(getWidth()+dx, getHeight()+dy);
break;
case CLIPBOARD:
ClipboardDialog.showDialog(window());
break;
case CTRL:
if (((JMenuItem)ev.getSource()).isSelected())
handleKeyPress(0x1d, XK_Control_L);
else
handleKeyRelease(0x1d);
menuCtrlKey = !menuCtrlKey;
break;
case ALT:
if (((JMenuItem)ev.getSource()).isSelected())
handleKeyPress(0x38, XK_Alt_L);
else
handleKeyRelease(0x38);
menuAltKey = !menuAltKey;
break;
case MENUKEY:
menuRecursion = true;
handleKeyPress(menuKeyCode, menuKeySym);
menuRecursion = false;
handleKeyRelease(menuKeyCode);
break;
case CTRLALTDEL:
handleKeyPress(0x1d, XK_Control_L);
handleKeyPress(0x38, XK_Alt_L);
handleKeyPress(0xd3, XK_Delete);
handleKeyRelease(0xd3);
handleKeyRelease(0x38);
handleKeyRelease(0x1d);
break;
case REFRESH:
cc.refreshFramebuffer();
break;
case NEWVIEWER:
VncViewer.newViewer();
break;
case OPTIONS:
OptionsDialog.showDialog(cc.desktop);
break;
case INFO:
Window fullScreenWindow =
DesktopWindow.getFullScreenWindow();
if (fullScreenWindow != null)
DesktopWindow.setFullScreenWindow(null);
JOptionPane op = new JOptionPane(cc.connectionInfo(),
JOptionPane.PLAIN_MESSAGE,
JOptionPane.DEFAULT_OPTION);
JDialog dlg = op.createDialog(window(), "VNC connection info");
dlg.setIconImage(VncViewer.frameIcon);
dlg.setAlwaysOnTop(true);
dlg.setVisible(true);
if (fullScreenWindow != null)
DesktopWindow.setFullScreenWindow(fullScreenWindow);
break;
case ABOUT:
VncViewer.about_vncviewer(cc.desktop);
break;
case DISMISS:
break;
}
}
private void setMenuKey()
{
menuKeyJava = MenuKey.getMenuKeyJavaCode();
menuKeyCode = MenuKey.getMenuKeyCode();
menuKeySym = MenuKey.getMenuKeySym();
}
public void handleOptions()
{
setMenuKey();
/*
setScaledSize(cc.server.width(), cc.server.height());
if (!oldSize.equals(new Dimension(scaledWidth, scaledHeight))) {
// Re-layout the DesktopWindow when the scaled size changes.
// Ideally we'd do this with a ComponentListener, but unfortunately
// sometimes a spurious resize event is triggered on the viewport
// when the DesktopWindow is manually resized via the drag handles.
if (cc.desktop != null && cc.desktop.isVisible()) {
JScrollPane scroll = (JScrollPane)((JViewport)getParent()).getParent();
scroll.setViewportBorder(BorderFactory.createEmptyBorder(0,0,0,0));
cc.desktop.pack();
}
*/
}
public void releaseDownKeys() {
while (!downKeySym.isEmpty())
handleKeyRelease(downKeySym.keySet().iterator().next());
}
private DesktopWindow window() {
return (DesktopWindow)getTopLevelAncestor();
}
private int x() { return getX(); }
private int y() { return getY(); }
private int w() { return getWidth(); }
private int h() { return getHeight(); }
// access to cc by different threads is specified in CConn
private CConn cc;
// access to the following must be synchronized:
private PlatformPixelBuffer frameBuffer;
Point lastPointerPos = new Point(0, 0);
int lastButtonMask = 0;
private class DownMap extends HashMap<Long, Integer> {
public DownMap(int capacity) {
super(capacity);
}
}
DownMap downKeySym = new DownMap(256);
int menuKeySym;
int menuKeyCode, menuKeyJava;
JPopupMenu contextMenu;
boolean menuRecursion = false;
boolean menuCtrlKey = false;
boolean menuAltKey = false;
static Toolkit tk = Toolkit.getDefaultToolkit();
public int scaledWidth = 0, scaledHeight = 0;
float scaleRatioX, scaleRatioY;
static BufferedImage cursor;
Point cursorHotspot = new Point();
}
| TigerVNC/tigervnc | java/com/tigervnc/vncviewer/Viewport.java |
1,451 | /* ____ ______________ ________________________ __________
* \ \/ / \ \/ / __/ / \ \/ / \
* \______/___/\___\______/___/_____/___/\___\______/___/\___\
*
* The MIT License (MIT)
*
* Copyright 2023 Vavr, https://vavr.io
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package io.vavr.collection;
import io.vavr.*;
import io.vavr.collection.CharSeqModule.Combinations;
import io.vavr.collection.JavaConverters.ListView;
import io.vavr.control.Option;
import java.io.Serializable;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.util.*;
import java.util.function.*;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collector;
import static io.vavr.collection.JavaConverters.ChangePolicy.IMMUTABLE;
import static io.vavr.collection.JavaConverters.ChangePolicy.MUTABLE;
/**
* The CharSeq (read: character sequence) collection essentially is a rich String wrapper having all operations
* we know from the functional Vavr collections.
* <p>
* <strong>Note:</strong>Because CharSeq represents a sequence of primitive characters (i.e. a String),
* it breaks the Liskov Substitution Principle in the way, that the CharSeq cannot contain {@code null} elements.
* In future version of Java, CharSeq should extend IndexedSeq<char> instead.
*/
public final class CharSeq implements CharSequence, IndexedSeq<Character>, Serializable, Comparable<CharSeq> {
private static final long serialVersionUID = 1L;
private static final CharSeq EMPTY = new CharSeq("");
private final String back;
private CharSeq(String javaString) {
this.back = javaString;
}
public static CharSeq empty() {
return EMPTY;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link CharSeq}.
*
* @return a {@code Collector} which collects all the input elements into a
* {@link io.vavr.collection.CharSeq}, in encounter order
*/
public static Collector<Character, ArrayList<Character>, CharSeq> collector() {
return Collections.toListAndThen(CharSeq::ofAll);
}
/**
* Creates a String of {@code CharSequence}.
*
* @param sequence {@code CharSequence} instance.
* @return A new {@link io.vavr.collection.CharSeq}
*/
// DEV-NOTE: Needs to be 'of' instead of 'ofAll' because 'ofAll(CharSeq)' is ambiguous.
public static CharSeq of(CharSequence sequence) {
Objects.requireNonNull(sequence, "sequence is null");
if (sequence instanceof CharSeq) {
return (CharSeq) sequence;
} else {
return sequence.length() == 0 ? empty() : new CharSeq(sequence.toString());
}
}
/**
* Returns a singleton {@code CharSeq}, i.e. a {@code CharSeq} of one character.
*
* @param character A character.
* @return A new {@code CharSeq} instance containing the given element
*/
public static CharSeq of(char character) {
return new CharSeq(new String(new char[] { character }));
}
/**
* Creates a String of the given characters.
*
* @param characters Zero or more characters.
* @return A string containing the given characters in the same order.
* @throws NullPointerException if {@code elements} is null
*/
public static CharSeq of(char... characters) {
Objects.requireNonNull(characters, "characters is null");
if (characters.length == 0) {
return empty();
} else {
final char[] chrs = new char[characters.length];
System.arraycopy(characters, 0, chrs, 0, characters.length);
return new CharSeq(new String(chrs));
}
}
/**
* Creates a String of the given elements.
* <p>
* The resulting string has the same iteration order as the given iterable of elements
* if the iteration order of the elements is stable.
*
* @param elements An Iterable of elements.
* @return A string containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null or {@code elements} contains null
*/
@SuppressWarnings("unchecked")
public static CharSeq ofAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
if (Collections.isEmpty(elements)){
return EMPTY;
}
if (elements instanceof CharSeq) {
return (CharSeq) elements;
}
if (elements instanceof ListView
&& ((ListView<Character, ?>) elements).getDelegate() instanceof CharSeq) {
return (CharSeq) ((ListView<Character, ?>) elements).getDelegate();
}
final StringBuilder sb = new StringBuilder();
for (char character : elements) {
sb.append(character);
}
return of(sb);
}
/**
* Returns a CharSeq containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param n The number of elements in the CharSeq
* @param f The Function computing element values
* @return A CharSeq consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static CharSeq tabulate(int n, Function<? super Integer, ? extends Character> f) {
Objects.requireNonNull(f, "f is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < n; i++) {
sb.append(f.apply(i).charValue());
}
return of(sb);
}
/**
* Returns a CharSeq containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param n The number of elements in the CharSeq
* @param s The Supplier computing element values
* @return A CharSeq of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static CharSeq fill(int n, Supplier<? extends Character> s) {
return tabulate(n, anything -> s.get());
}
/**
* Creates a CharSeq starting from character {@code from}, extending to character {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* CharSeq.range('a', 'c') // = "ab"
* CharSeq.range('c', 'a') // = ""
* </code>
* </pre>
*
* @param from the first character
* @param toExclusive the successor of the last character
* @return a range of characters as specified or the empty range if {@code from >= toExclusive}
*/
public static CharSeq range(char from, char toExclusive) {
return new CharSeq(io.vavr.collection.Iterator.range(from, toExclusive).mkString());
}
public static CharSeq rangeBy(char from, char toExclusive, int step) {
return new CharSeq(io.vavr.collection.Iterator.rangeBy(from, toExclusive, step).mkString());
}
/**
* Creates a CharSeq starting from character {@code from}, extending to character {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* CharSeq.rangeClosed('a', 'c') // = "abc"
* CharSeq.rangeClosed('c', 'a') // = ""
* </code>
* </pre>
*
* @param from the first character
* @param toInclusive the last character
* @return a range of characters as specified or the empty range if {@code from > toInclusive}
*/
public static CharSeq rangeClosed(char from, char toInclusive) {
return new CharSeq(io.vavr.collection.Iterator.rangeClosed(from, toInclusive).mkString());
}
/**
* Creates a CharSeq starting from character {@code from}, extending to character {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* CharSeq.rangeClosedBy('a', 'c', 1) // = ('a', 'b', 'c')
* CharSeq.rangeClosedBy('a', 'd', 2) // = ('a', 'c')
* CharSeq.rangeClosedBy('d', 'a', -2) // = ('d', 'b')
* CharSeq.rangeClosedBy('d', 'a', 2) // = ()
* </code>
* </pre>
*
* @param from the first character
* @param toInclusive the last character
* @param step the step
* @return a range of characters as specified or the empty range if {@code step * (from - toInclusive) > 0}.
* @throws IllegalArgumentException if {@code step} is zero
*/
public static CharSeq rangeClosedBy(char from, char toInclusive, int step) {
return new CharSeq(io.vavr.collection.Iterator.rangeClosedBy(from, toInclusive, step).mkString());
}
/**
* Creates a CharSeq from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the CharSeq, otherwise {@code Some} {@code Tuple}
* of the element for the next call and the value to add to the
* resulting CharSeq.
* <p>
* Example:
* <pre>
* <code>
* CharSeq.unfoldRight('j', x -> x == 'a'
* ? Option.none()
* : Option.of(new Tuple2<>(new Character(x), (char)(x-1))));
* // CharSeq.of("jihgfedcb"))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a CharSeq with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T> CharSeq unfoldRight(T seed, Function<? super T, Option<Tuple2<? extends Character, ? extends T>>> f) {
return CharSeq.ofAll(io.vavr.collection.Iterator.unfoldRight(seed, f));
}
/**
* Creates a CharSeq from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the CharSeq, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting CharSeq and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* CharSeq.unfoldLeft('j', x -> x == 'a'
* ? Option.none()
* : Option.of(new Tuple2<>((char)(x-1), new Character(x))));
* // CharSeq.of("bcdefghij"))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a CharSeq with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static <T> CharSeq unfoldLeft(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends Character>>> f) {
return CharSeq.ofAll(io.vavr.collection.Iterator.unfoldLeft(seed, f));
}
/**
* Creates a CharSeq from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating the CharSeq, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting CharSeq and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* CharSeq.unfold('j', x -> x == 'a'
* ? Option.none()
* : Option.of(new Tuple2<>((char)(x-1), new Character(x))));
* // CharSeq.of("bcdefghij"))
* </code>
* </pre>
*
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a CharSeq with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
public static CharSeq unfold(Character seed, Function<? super Character, Option<Tuple2<? extends Character, ? extends Character>>> f) {
return CharSeq.ofAll(io.vavr.collection.Iterator.unfold(seed, f));
}
private Tuple2<CharSeq, CharSeq> splitByBuilder(StringBuilder sb) {
if (sb.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (sb.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(sb), of(back.substring(sb.length())));
}
}
/**
* Repeats a character {@code times} times.
*
* @param character A character
* @param times Repetition count
* @return A CharSeq representing {@code character * times}
*/
public static CharSeq repeat(char character, int times) {
final int length = Math.max(times, 0);
final char[] characters = new char[length];
java.util.Arrays.fill(characters, character);
return new CharSeq(String.valueOf(characters));
}
/**
* Repeats this CharSeq {@code times} times.
* <p>
* Example: {@code CharSeq.of("ja").repeat(13) = "jajajajajajajajajajajajaja"}
*
* @param times Repetition count
* @return A CharSeq representing {@code this * times}
*/
public CharSeq repeat(int times) {
if (times <= 0 || isEmpty()) {
return empty();
} else if (times == 1) {
return this;
} else {
final int finalLength = length() * times;
final char[] result = new char[finalLength];
back.getChars(0, length(), result, 0);
int i = length();
for (; i <= (finalLength >>> 1); i <<= 1) {
System.arraycopy(result, 0, result, i, i);
}
System.arraycopy(result, 0, result, i, finalLength - i);
return of(new String(result));
}
}
//
//
// IndexedSeq
//
//
@Override
public CharSeq append(Character element) {
// DEV-NOTE: we need to unbox, otherwise "null" will be appended to back
final char c = element;
return of(back + c);
}
@Override
public CharSeq appendAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
if (Collections.isEmpty(elements)) {
return this;
}
final StringBuilder sb = new StringBuilder(back);
for (char element : elements) {
sb.append(element);
}
return of(sb);
}
@Override
public java.util.List<Character> asJava() {
return JavaConverters.asJava(this, IMMUTABLE);
}
@Override
public CharSeq asJava(Consumer<? super java.util.List<Character>> action) {
return Collections.asJava(this, action, IMMUTABLE);
}
@Override
public java.util.List<Character> asJavaMutable() {
return JavaConverters.asJava(this, MUTABLE);
}
@Override
public CharSeq asJavaMutable(Consumer<? super java.util.List<Character>> action) {
return Collections.asJava(this, action, MUTABLE);
}
@Override
public <R> IndexedSeq<R> collect(PartialFunction<? super Character, ? extends R> partialFunction) {
return Vector.ofAll(iterator().<R> collect(partialFunction));
}
@Override
public IndexedSeq<CharSeq> combinations() {
return Vector.rangeClosed(0, length()).map(this::combinations).flatMap(Function.identity());
}
@Override
public IndexedSeq<CharSeq> combinations(int k) {
return Combinations.apply(this, Math.max(k, 0));
}
@Override
public io.vavr.collection.Iterator<CharSeq> crossProduct(int power) {
return io.vavr.collection.Collections.crossProduct(CharSeq.empty(), this, power);
}
@Override
public CharSeq distinct() {
return distinctBy(Function.identity());
}
@Override
public CharSeq distinctBy(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
final java.util.Set<Character> seen = new java.util.TreeSet<>(comparator);
return filter(seen::add);
}
@Override
public <U> CharSeq distinctBy(Function<? super Character, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
final java.util.Set<U> seen = new java.util.HashSet<>();
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
public CharSeq drop(int n) {
if (n <= 0) {
return this;
} else if (n >= length()) {
return EMPTY;
} else {
return of(back.substring(n));
}
}
@Override
public CharSeq dropUntil(Predicate<? super Character> predicate) {
return io.vavr.collection.Collections.dropUntil(this, predicate);
}
@Override
public CharSeq dropWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropUntil(predicate.negate());
}
@Override
public CharSeq dropRight(int n) {
if (n <= 0) {
return this;
} else if (n >= length()) {
return EMPTY;
} else {
return of(back.substring(0, length() - n));
}
}
@Override
public CharSeq dropRightWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropRightUntil(predicate.negate());
}
@Override
public CharSeq dropRightUntil(Predicate<? super Character> predicate) {
return io.vavr.collection.Collections.dropRightUntil(this, predicate);
}
@Override
public CharSeq filter(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < back.length(); i++) {
final char ch = get(i);
if (predicate.test(ch)) {
sb.append(ch);
}
}
if (sb.length() == 0) {
return EMPTY;
} else if (sb.length() == length()) {
return this;
} else {
return of(sb);
}
}
@Override
public CharSeq filterNot(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Collections.filterNot(this, predicate);
}
@Override
public <U> IndexedSeq<U> flatMap(Function<? super Character, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return Vector.empty();
} else {
IndexedSeq<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
for (U u : mapper.apply(get(i))) {
result = result.append(u);
}
}
return result;
}
}
public CharSeq flatMapChars(CharFunction<? extends CharSequence> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final StringBuilder builder = new StringBuilder();
for (int i = 0; i < back.length(); i++) {
builder.append(mapper.apply(back.charAt(i)));
}
return of(builder);
}
}
@Override
public Seq<CharSeq> group() {
return Collections.group(this, CharSeq::empty);
}
@Override
public <C> Map<C, CharSeq> groupBy(Function<? super Character, ? extends C> classifier) {
return io.vavr.collection.Collections.groupBy(this, classifier, CharSeq::ofAll);
}
@Override
public io.vavr.collection.Iterator<CharSeq> grouped(int size) {
return sliding(size, size);
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public CharSeq init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty string");
} else {
return of(back.substring(0, length() - 1));
}
}
@Override
public Option<CharSeq> initOption() {
return isEmpty() ? Option.none() : Option.some(init());
}
@Override
public CharSeq insert(int index, Character element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on String of length " + length());
}
final char c = element;
return of(new StringBuilder(back).insert(index, c).toString());
}
@Override
public CharSeq insertAll(int index, Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on String of length " + length());
}
final StringBuilder sb = new StringBuilder(back.substring(0, index));
for (char element : elements) {
sb.append(element);
}
sb.append(back.substring(index));
return of(sb);
}
@Override
public io.vavr.collection.Iterator<Character> iterator() {
return io.vavr.collection.Iterator.ofAll(toCharArray());
}
@Override
public CharSeq intersperse(Character element) {
final char c = element; // intentionally throw when element is null
if (isEmpty()) {
return EMPTY;
} else {
final StringBuilder sb = new StringBuilder().append(head());
for (int i = 1; i < length(); i++) {
sb.append(c).append(get(i));
}
return of(sb);
}
}
@Override
public <U> IndexedSeq<U> map(Function<? super Character, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
IndexedSeq<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(mapper.apply(get(i)));
}
return result;
}
@Override
public String mkString() {
return back;
}
@Override
public CharSeq padTo(int length, Character element) {
final int actualLength = back.length();
if (length <= actualLength) {
return this;
} else {
return new CharSeq(back + padding(element, length - actualLength));
}
}
@Override
public CharSeq leftPadTo(int length, Character element) {
final int actualLength = back.length();
if (length <= actualLength) {
return this;
} else {
return of(padding(element, length - actualLength).append(back));
}
}
@Override
public CharSeq orElse(Iterable<? extends Character> other) {
return isEmpty() ? ofAll(other) : this;
}
@Override
public CharSeq orElse(Supplier<? extends Iterable<? extends Character>> supplier) {
return isEmpty() ? ofAll(supplier.get()) : this;
}
private static StringBuilder padding(char element, int limit) {
final StringBuilder padding = new StringBuilder();
for (int i = 0; i < limit; i++) {
padding.append(element);
}
return padding;
}
@Override
public CharSeq patch(int from, Iterable<? extends Character> that, int replaced) {
from = from < 0 ? 0 : from > length() ? length() : from;
replaced = Math.max(replaced, 0);
final StringBuilder sb = new StringBuilder(back.substring(0, from));
for (char character : that) {
sb.append(character);
}
from += replaced;
if (from < length()) {
sb.append(back.substring(from));
}
return sb.length() == 0 ? EMPTY : of(sb);
}
public CharSeq mapChars(CharUnaryOperator mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final char[] chars = back.toCharArray();
for (int i = 0; i < chars.length; i++) {
chars[i] = mapper.apply(chars[i]);
}
return CharSeq.of(chars);
}
}
@Override
public Tuple2<CharSeq, CharSeq> partition(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
final StringBuilder right = new StringBuilder();
for (int i = 0; i < length(); i++) {
final Character t = get(i);
(predicate.test(t) ? left : right).append(t);
}
if (left.length() == 0) {
return Tuple.of(EMPTY, of(right.toString()));
} else if (right.length() == 0) {
return Tuple.of(of(left.toString()), EMPTY);
} else {
return Tuple.of(of(left.toString()), of(right.toString()));
}
}
@Override
public CharSeq peek(Consumer<? super Character> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(get(0));
}
return this;
}
@Override
public IndexedSeq<CharSeq> permutations() {
if (isEmpty()) {
return Vector.empty();
} else {
if (length() == 1) {
return Vector.of(this);
} else {
IndexedSeq<CharSeq> result = Vector.empty();
for (Character t : distinct()) {
for (CharSeq ts : remove(t).permutations()) {
result = result.append(CharSeq.of(t).appendAll(ts));
}
}
return result;
}
}
}
@Override
public CharSeq prepend(Character element) {
final char c = element;
return of(c + back);
}
@Override
public CharSeq prependAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
if (Collections.isEmpty(elements)) {
return this;
} else if (isEmpty()) {
return ofAll(elements);
} else {
final StringBuilder sb = new StringBuilder();
for (char element : elements) {
sb.append(element);
}
sb.append(back);
return CharSeq.of(sb);
}
}
@Override
public CharSeq remove(Character element) {
if (element == null) {
return this;
}
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = get(i);
if (!found && c == element) {
found = true;
} else {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb);
}
@Override
public CharSeq removeFirst(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < back.length(); i++) {
final char ch = get(i);
if (predicate.test(ch)) {
if (found) {
sb.append(ch);
}
found = true;
} else {
sb.append(ch);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq removeLast(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = length() - 1; i >= 0; i--) {
if (predicate.test(get(i))) {
return removeAt(i);
}
}
return this;
}
@Override
public CharSeq removeAt(int index) {
final String removed = back.substring(0, index) + back.substring(index + 1);
return removed.isEmpty() ? EMPTY : of(removed);
}
@Override
public CharSeq removeAll(Character element) {
if (element == null) {
return this;
}
return io.vavr.collection.Collections.removeAll(this, element);
}
@Override
public CharSeq removeAll(Iterable<? extends Character> elements) {
return io.vavr.collection.Collections.removeAll(this, elements);
}
@Override
public CharSeq replace(Character currentElement, Character newElement) {
if (currentElement == null) {
return this;
}
final char currentChar = currentElement;
final char newChar = newElement;
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = get(i);
if (!found && c == currentChar) {
sb.append(newChar);
found = true;
} else {
sb.append(c);
}
}
return found ? of(sb) : this;
}
@Override
public CharSeq replaceAll(Character currentElement, Character newElement) {
if (currentElement == null) {
return this;
}
final char currentChar = currentElement;
final char newChar = newElement;
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = get(i);
if (c == currentChar) {
sb.append(newChar);
found = true;
} else {
sb.append(c);
}
}
return found ? of(sb) : this;
}
@Override
public CharSeq retainAll(Iterable<? extends Character> elements) {
return io.vavr.collection.Collections.retainAll(this, elements);
}
@Override
public CharSeq reverse() {
return of(new StringBuilder(back).reverse().toString());
}
@Override
public CharSeq rotateLeft(int n) {
return Collections.rotateLeft(this, n);
}
@Override
public CharSeq rotateRight(int n) {
return Collections.rotateRight(this, n);
}
@Override
public CharSeq scan(Character zero, BiFunction<? super Character, ? super Character, ? extends Character> operation) {
return io.vavr.collection.Collections.scanLeft(this, zero, operation, io.vavr.collection.Iterator::toCharSeq);
}
@Override
public <U> IndexedSeq<U> scanLeft(U zero, BiFunction<? super U, ? super Character, ? extends U> operation) {
return io.vavr.collection.Collections.scanLeft(this, zero, operation, io.vavr.collection.Iterator::toVector);
}
@Override
public <U> IndexedSeq<U> scanRight(U zero, BiFunction<? super Character, ? super U, ? extends U> operation) {
return io.vavr.collection.Collections.scanRight(this, zero, operation, io.vavr.collection.Iterator::toVector);
}
@Override
public CharSeq shuffle() {
return io.vavr.collection.Collections.shuffle(this, CharSeq::ofAll);
}
@Override
public CharSeq shuffle(Random random) {
return io.vavr.collection.Collections.shuffle(this, random, CharSeq::ofAll);
}
@Override
public CharSeq slice(int beginIndex, int endIndex) {
final int from = Math.max(beginIndex, 0);
final int to = endIndex > length() ? length() : endIndex;
if (from >= to) {
return EMPTY;
}
if (from <= 0 && to >= length()) {
return this;
}
return CharSeq.of(back.substring(from, to));
}
@Override
public io.vavr.collection.Iterator<CharSeq> slideBy(Function<? super Character, ?> classifier) {
return iterator().slideBy(classifier).map(CharSeq::ofAll);
}
@Override
public io.vavr.collection.Iterator<CharSeq> sliding(int size) {
return sliding(size, 1);
}
@Override
public io.vavr.collection.Iterator<CharSeq> sliding(int size, int step) {
return iterator().sliding(size, step).map(CharSeq::ofAll);
}
@Override
public CharSeq sorted() {
return isEmpty() ? this : toJavaStream().sorted().collect(CharSeq.collector());
}
@Override
public CharSeq sorted(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(CharSeq.collector());
}
@Override
public <U extends Comparable<? super U>> CharSeq sortBy(Function<? super Character, ? extends U> mapper) {
return sortBy(U::compareTo, mapper);
}
@Override
public <U> CharSeq sortBy(Comparator<? super U> comparator, Function<? super Character, ? extends U> mapper) {
return Collections.sortBy(this, comparator, mapper, collector());
}
@Override
public Tuple2<CharSeq, CharSeq> span(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = get(i);
if (predicate.test(c)) {
sb.append(c);
} else {
break;
}
}
return splitByBuilder(sb);
}
@Override
public CharSeq subSequence(int beginIndex) {
if (beginIndex < 0 || beginIndex > length()) {
throw new IndexOutOfBoundsException("begin index " + beginIndex + " < 0");
}
if (beginIndex == 0) {
return this;
} else if (beginIndex == length()) {
return EMPTY;
} else {
return CharSeq.of(back.substring(beginIndex));
}
}
@Override
public CharSeq tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty string");
} else {
return CharSeq.of(back.substring(1));
}
}
@Override
public Option<CharSeq> tailOption() {
return isEmpty() ? Option.none() : Option.some(tail());
}
@Override
public CharSeq take(int n) {
if (n <= 0) {
return EMPTY;
} else if (n >= length()) {
return this;
} else {
return CharSeq.of(back.substring(0, n));
}
}
@Override
public CharSeq takeUntil(Predicate<? super Character> predicate) {
return io.vavr.collection.Collections.takeUntil(this, predicate);
}
@Override
public CharSeq takeWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeUntil(predicate.negate());
}
@Override
public CharSeq takeRight(int n) {
if (n <= 0) {
return EMPTY;
} else if (n >= length()) {
return this;
} else {
return CharSeq.of(back.substring(length() - n));
}
}
@Override
public CharSeq takeRightUntil(Predicate<? super Character> predicate) {
return io.vavr.collection.Collections.takeRightUntil(this, predicate);
}
@Override
public CharSeq takeRightWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeRightUntil(predicate.negate());
}
/**
* Transforms this {@code CharSeq}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
public <U> U transform(Function<? super CharSeq, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
public CharSeq update(int index, Character element) {
if ((index < 0) || (index >= length())) {
throw new IndexOutOfBoundsException("update(" + index + ")");
} else {
char c = element;
return of(back.substring(0, index) + c + back.substring(index + 1));
}
}
@Override
public CharSeq update(int index, Function<? super Character, ? extends Character> updater) {
Objects.requireNonNull(updater, "updater is null");
final char c = updater.apply(get(index));
return update(index, c);
}
@Override
public <U> IndexedSeq<Tuple2<Character, U>> zip(Iterable<? extends U> that) {
return zipWith(that, Tuple::of);
}
@Override
public <U, R> IndexedSeq<R> zipWith(Iterable<? extends U> that, BiFunction<? super Character, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
Objects.requireNonNull(mapper, "mapper is null");
IndexedSeq<R> result = Vector.empty();
final io.vavr.collection.Iterator<Character> list1 = iterator();
final java.util.Iterator<? extends U> list2 = that.iterator();
while (list1.hasNext() && list2.hasNext()) {
result = result.append(mapper.apply(list1.next(), list2.next()));
}
return result;
}
@Override
public <U> IndexedSeq<Tuple2<Character, U>> zipAll(Iterable<? extends U> that, Character thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
IndexedSeq<Tuple2<Character, U>> result = Vector.empty();
final io.vavr.collection.Iterator<Character> list1 = iterator();
final java.util.Iterator<? extends U> list2 = that.iterator();
while (list1.hasNext() || list2.hasNext()) {
final Character elem1 = list1.hasNext() ? list1.next() : thisElem;
final U elem2 = list2.hasNext() ? list2.next() : thatElem;
result = result.append(Tuple.of(elem1, elem2));
}
return result;
}
@Override
public IndexedSeq<Tuple2<Character, Integer>> zipWithIndex() {
return zipWithIndex(Tuple::of);
}
@Override
public <U> IndexedSeq<U> zipWithIndex(BiFunction<? super Character, ? super Integer, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
IndexedSeq<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(mapper.apply(get(i), i));
}
return result;
}
@Override
public Character apply(Integer index) {
return back.charAt(index);
}
@Override
public int indexOf(Character element, int from) {
return back.indexOf(element, from);
}
@Override
public int lastIndexOf(Character element, int end) {
return back.lastIndexOf(element, end);
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(int n) {
if (n <= 0) {
return Tuple.of(EMPTY, this);
} else if (n >= length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(back.substring(0, n)), of(back.substring(n)));
}
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
final Character t = get(i);
if (!predicate.test(t)) {
left.append(t);
} else {
break;
}
}
return splitByBuilder(left);
}
@Override
public Tuple2<CharSeq, CharSeq> splitAtInclusive(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
final Character t = get(i);
left.append(t);
if (predicate.test(t)) {
break;
}
}
return splitByBuilder(left);
}
@Override
public boolean startsWith(Iterable<? extends Character> that, int offset) {
return startsWith(CharSeq.ofAll(that), offset);
}
@Override
public Character head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty string");
} else {
return get(0);
}
}
/**
* A {@code CharSeq} is computed synchronously.
*
* @return false
*/
@Override
public boolean isAsync() {
return false;
}
@Override
public boolean isEmpty() {
return back.isEmpty();
}
/**
* A {@code CharSeq} is computed eagerly.
*
* @return false
*/
@Override
public boolean isLazy() {
return false;
}
@Override
public boolean isTraversableAgain() {
return true;
}
private Object readResolve() {
return isEmpty() ? EMPTY : this;
}
@Override
public boolean equals(Object o) {
return io.vavr.collection.Collections.equals(this, o);
}
@Override
public int hashCode() {
return io.vavr.collection.Collections.hashOrdered(this);
}
//
//
// CharSequence
//
//
/**
* Returns the {@code char} value at the
* specified index. An index ranges from {@code 0} to
* {@code length() - 1}. The first {@code char} value of the sequence
* is at index {@code 0}, the next at index {@code 1},
* and so on, as for array indexing.
* <p>
* If the {@code char} value specified by the index is a
* <a href="Character.html#unicode">surrogate</a>, the surrogate
* value is returned.
*
* @param index the index of the {@code char} value.
* @return the {@code char} value at the specified index of this string.
* The first {@code char} value is at index {@code 0}.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
@Override
public char charAt(int index) {
return get(index);
}
/**
* Returns the length of this string.
* The length is equal to the number of <a href="Character.html#unicode">Unicode
* code units</a> in the string.
*
* @return the length of the sequence of characters represented by this
* object.
*/
@Override
public int length() {
return back.length();
}
//
//
// String
//
//
/**
* Returns the character (Unicode code point) at the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 0} to
* {@link #length()}{@code - 1}.
* <p>
* If the {@code char} value specified at the given index
* is in the high-surrogate range, the following index is less
* than the length of this {@code CharSeq}, and the
* {@code char} value at the following index is in the
* low-surrogate range, then the supplementary code point
* corresponding to this surrogate pair is returned. Otherwise,
* the {@code char} value at the given index is returned.
*
* @param index the index to the {@code char} values
* @return the code point value of the character at the
* {@code index}
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
public int codePointAt(int index) {
return back.codePointAt(index);
}
/**
* Returns the character (Unicode code point) before the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 1} to {@link
* CharSequence#length() length}.
* <p>
* If the {@code char} value at {@code (index - 1)}
* is in the low-surrogate range, {@code (index - 2)} is not
* negative, and the {@code char} value at {@code (index -
* 2)} is in the high-surrogate range, then the
* supplementary code point value of the surrogate pair is
* returned. If the {@code char} value at {@code index -
* 1} is an unpaired low-surrogate or a high-surrogate, the
* surrogate value is returned.
*
* @param index the index following the code point that should be returned
* @return the Unicode code point value before the given index.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is less than 1 or greater than the length
* of this string.
*/
public int codePointBefore(int index) {
return back.codePointBefore(index);
}
/**
* Returns the number of Unicode code points in the specified text
* range of this {@code CharSeq}. The text range begins at the
* specified {@code beginIndex} and extends to the
* {@code char} at index {@code endIndex - 1}. Thus the
* length (in {@code char}s) of the text range is
* {@code endIndex-beginIndex}. Unpaired surrogates within
* the text range count as one code point each.
*
* @param beginIndex the index to the first {@code char} of
* the text range.
* @param endIndex the index after the last {@code char} of
* the text range.
* @return the number of Unicode code points in the specified text
* range
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or {@code endIndex}
* is larger than the length of this {@code CharSeq}, or
* {@code beginIndex} is larger than {@code endIndex}.
*/
public int codePointCount(int beginIndex, int endIndex) {
return back.codePointCount(beginIndex, endIndex);
}
/**
* Returns the index within this {@code CharSeq} that is
* offset from the given {@code index} by
* {@code codePointOffset} code points. Unpaired surrogates
* within the text range given by {@code index} and
* {@code codePointOffset} count as one code point each.
*
* @param index the index to be offset
* @param codePointOffset the offset in code points
* @return the index within this {@code CharSeq}
* @throws IndexOutOfBoundsException if {@code index}
* is negative or larger then the length of this
* {@code CharSeq}, or if {@code codePointOffset} is positive
* and the substring starting with {@code index} has fewer
* than {@code codePointOffset} code points,
* or if {@code codePointOffset} is negative and the substring
* before {@code index} has fewer than the absolute value
* of {@code codePointOffset} code points.
*/
public int offsetByCodePoints(int index, int codePointOffset) {
return back.offsetByCodePoints(index, codePointOffset);
}
/**
* Copies characters from this string into the destination character
* array.
* <p>
* The first character to be copied is at index {@code srcBegin};
* the last character to be copied is at index {@code srcEnd-1}
* (thus the total number of characters to be copied is
* {@code srcEnd-srcBegin}). The characters are copied into the
* subarray of {@code dst} starting at index {@code dstBegin}
* and ending at index:
* <blockquote><pre>
* dstbegin + (srcEnd-srcBegin) - 1
* </pre></blockquote>
*
* @param srcBegin index of the first character in the string
* to copy.
* @param srcEnd index after the last character in the string
* to copy.
* @param dst the destination array.
* @param dstBegin the start offset in the destination array.
* @throws IndexOutOfBoundsException If any of the following
* is true:
* <ul><li>{@code srcBegin} is negative.
* <li>{@code srcBegin} is greater than {@code srcEnd}
* <li>{@code srcEnd} is greater than the length of this
* string
* <li>{@code dstBegin} is negative
* <li>{@code dstBegin+(srcEnd-srcBegin)} is larger than
* {@code dst.length}</ul>
*/
public void getChars(int srcBegin, int srcEnd, char dst[], int dstBegin) {
back.getChars(srcBegin, srcEnd, dst, dstBegin);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the named
* charset, storing the result into a new byte array.
* <p>
* The behavior of this method when this string cannot be encoded in
* the given charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @param charsetName The name of a supported {@linkplain java.nio.charset.Charset
* charset}
* @return The resultant byte array
* @throws UnsupportedEncodingException If the named charset is not supported
*/
public byte[] getBytes(String charsetName) throws UnsupportedEncodingException {
return back.getBytes(charsetName);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the given
* {@linkplain java.nio.charset.Charset charset}, storing the result into a
* new byte array.
* <p>
* This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
* {@link java.nio.charset.CharsetEncoder} class should be used when more
* control over the encoding process is required.
*
* @param charset The {@linkplain java.nio.charset.Charset} to be used to encode
* the {@code CharSeq}
* @return The resultant byte array
*/
public byte[] getBytes(Charset charset) {
return back.getBytes(charset);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the
* platform's default charset, storing the result into a new byte array.
* <p>
* The behavior of this method when this string cannot be encoded in
* the default charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @return The resultant byte array
*/
public byte[] getBytes() {
return back.getBytes();
}
/**
* Compares this string to the specified {@code StringBuffer}. The result
* is {@code true} if and only if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer}. This method
* synchronizes on the {@code StringBuffer}.
*
* @param sb The {@code StringBuffer} to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer},
* {@code false} otherwise
*/
public boolean contentEquals(StringBuffer sb) {
return back.contentEquals(sb);
}
/**
* Compares this string to the specified {@code CharSequence}. The
* result is {@code true} if and only if this {@code CharSeq} represents the
* same sequence of char values as the specified sequence. Note that if the
* {@code CharSequence} is a {@code StringBuffer} then the method
* synchronizes on it.
*
* @param cs The sequence to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of char values as the specified sequence, {@code
* false} otherwise
*/
public boolean contentEquals(CharSequence cs) {
return back.contentEquals(cs);
}
/**
* Compares this {@code CharSeq} to another {@code CharSeq}, ignoring case
* considerations. Two strings are considered equal ignoring case if they
* are of the same length and corresponding characters in the two strings
* are equal ignoring case.
* <p>
* Two characters {@code c1} and {@code c2} are considered the same
* ignoring case if at least one of the following is true:
* <ul>
* <li> The two characters are the same (as compared by the
* {@code ==} operator)
* <li> Applying the method {@link
* Character#toUpperCase(char)} to each character
* produces the same result
* <li> Applying the method {@link
* Character#toLowerCase(char)} to each character
* produces the same result
* </ul>
*
* @param anotherString The {@code CharSeq} to compare this {@code CharSeq} against
* @return {@code true} if the argument is not {@code null} and it
* represents an equivalent {@code CharSeq} ignoring case; {@code
* false} otherwise
* @see #equals(Object)
*/
public boolean equalsIgnoreCase(CharSeq anotherString) {
return back.equalsIgnoreCase(anotherString.back);
}
/**
* Compares two strings lexicographically.
* The comparison is based on the Unicode value of each character in
* the strings. The character sequence represented by this
* {@code CharSeq} object is compared lexicographically to the
* character sequence represented by the argument string. The result is
* a negative integer if this {@code CharSeq} object
* lexicographically precedes the argument string. The result is a
* positive integer if this {@code CharSeq} object lexicographically
* follows the argument string. The result is zero if the strings
* are equal; {@code compareTo} returns {@code 0} exactly when
* the {@link #equals(Object)} method would return {@code true}.
* <p>
* This is the definition of lexicographic ordering. If two strings are
* different, then either they have different characters at some index
* that is a valid index for both strings, or their lengths are different,
* or both. If they have different characters at one or more index
* positions, let <i>k</i> be the smallest such index; then the string
* whose character at position <i>k</i> has the smaller value, as
* determined by using the < operator, lexicographically precedes the
* other string. In this case, {@code compareTo} returns the
* difference of the two character values at position {@code k} in
* the two string -- that is, the value:
* <blockquote><pre>
* this.charAt(k)-anotherString.charAt(k)
* </pre></blockquote>
* If there is no index position at which they differ, then the shorter
* string lexicographically precedes the longer string. In this case,
* {@code compareTo} returns the difference of the lengths of the
* strings -- that is, the value:
* <blockquote><pre>
* this.length()-anotherString.length()
* </pre></blockquote>
*
* @param anotherString the {@code CharSeq} to be compared.
* @return the value {@code 0} if the argument string is equal to
* this string; a value less than {@code 0} if this string
* is lexicographically less than the string argument; and a
* value greater than {@code 0} if this string is
* lexicographically greater than the string argument.
*/
public int compareTo(CharSeq anotherString) {
return back.compareTo(anotherString.back);
}
/**
* Compares two strings lexicographically, ignoring case
* differences. This method returns an integer whose sign is that of
* calling {@code compareTo} with normalized versions of the strings
* where case differences have been eliminated by calling
* {@code Character.toLowerCase(Character.toUpperCase(character))} on
* each character.
* <p>
* Note that this method does <em>not</em> take locale into account,
* and will result in an unsatisfactory ordering for certain locales.
* The java.text package provides <em>collators</em> to allow
* locale-sensitive ordering.
*
* @param str the {@code CharSeq} to be compared.
* @return a negative integer, zero, or a positive integer as the
* specified String is greater than, equal to, or less
* than this String, ignoring case considerations.
*/
public int compareToIgnoreCase(CharSeq str) {
return back.compareToIgnoreCase(str.back);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument other. The result is true if these substrings
* represent identical character sequences. The substring of this
* {@code CharSeq} object to be compared begins at index {@code toffset}
* and has length {@code len}. The substring of other to be compared
* begins at index {@code ooffset} and has length {@code len}. The
* result is {@code false} if and only if at least one of the following
* is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>There is some nonnegative integer <i>k</i> less than {@code len}
* such that:
* {@code this.charAt(toffset + }<i>k</i>{@code ) != other.charAt(ooffset + }
* <i>k</i>{@code )}
* </ul>
*
* @param toffset the starting offset of the subregion in this string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* exactly matches the specified subregion of the string argument;
* {@code false} otherwise.
*/
public boolean regionMatches(int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(toffset, other.back, ooffset, len);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument {@code other}. The result is {@code true} if these
* substrings represent character sequences that are the same, ignoring
* case if and only if {@code ignoreCase} is true. The substring of
* this {@code CharSeq} object to be compared begins at index
* {@code toffset} and has length {@code len}. The substring of
* {@code other} to be compared begins at index {@code ooffset} and
* has length {@code len}. The result is {@code false} if and only if
* at least one of the following is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>{@code ignoreCase} is {@code false} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* this.charAt(toffset+k) != other.charAt(ooffset+k)
* </pre></blockquote>
* <li>{@code ignoreCase} is {@code true} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* Character.toLowerCase(this.charAt(toffset+k)) !=
* Character.toLowerCase(other.charAt(ooffset+k))
* </pre></blockquote>
* and:
* <blockquote><pre>
* Character.toUpperCase(this.charAt(toffset+k)) !=
* Character.toUpperCase(other.charAt(ooffset+k))
* </pre></blockquote>
* </ul>
*
* @param ignoreCase if {@code true}, ignore case when comparing
* characters.
* @param toffset the starting offset of the subregion in this
* string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* matches the specified subregion of the string argument;
* {@code false} otherwise. Whether the matching is exact
* or case insensitive depends on the {@code ignoreCase}
* argument.
*/
public boolean regionMatches(boolean ignoreCase, int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(ignoreCase, toffset, other.back, ooffset, len);
}
@Override
public CharSeq subSequence(int beginIndex, int endIndex) {
if (beginIndex < 0) {
throw new IndexOutOfBoundsException("begin index " + beginIndex + " < 0");
}
if (endIndex > length()) {
throw new IndexOutOfBoundsException("endIndex " + endIndex + " > length " + length());
}
final int subLen = endIndex - beginIndex;
if (subLen < 0) {
throw new IllegalArgumentException("beginIndex " + beginIndex + " > endIndex " + endIndex);
}
if (beginIndex == 0 && endIndex == length()) {
return this;
} else {
return CharSeq.of(back.subSequence(beginIndex, endIndex));
}
}
/**
* Tests if the substring of this string beginning at the
* specified index starts with the specified prefix.
*
* @param prefix the prefix.
* @param toffset where to begin looking in this string.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the substring of this object starting
* at index {@code toffset}; {@code false} otherwise.
* The result is {@code false} if {@code toffset} is
* negative or greater than the length of this
* {@code CharSeq} object; otherwise the result is the same
* as the result of the expression
* <pre>
* this.substring(toffset).startsWith(prefix)
* </pre>
*/
public boolean startsWith(CharSeq prefix, int toffset) {
return back.startsWith(prefix.back, toffset);
}
/**
* Tests if this string starts with the specified prefix.
*
* @param prefix the prefix.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the character sequence represented by
* this string; {@code false} otherwise.
* Note also that {@code true} will be returned if the
* argument is an empty string or is equal to this
* {@code CharSeq} object as determined by the
* {@link #equals(Object)} method.
*/
public boolean startsWith(CharSeq prefix) {
return back.startsWith(prefix.back);
}
/**
* Tests if this string ends with the specified suffix.
*
* @param suffix the suffix.
* @return {@code true} if the character sequence represented by the
* argument is a suffix of the character sequence represented by
* this object; {@code false} otherwise. Note that the
* result will be {@code true} if the argument is the
* empty string or is equal to this {@code CharSeq} object
* as determined by the {@link #equals(Object)} method.
*/
public boolean endsWith(CharSeq suffix) {
return back.endsWith(suffix.back);
}
/**
* Returns the index within this string of the first occurrence of
* the specified character. If a character with value
* {@code ch} occurs in the character sequence represented by
* this {@code CharSeq} object, then the index (in Unicode
* code units) of the first such occurrence is returned. For
* values of {@code ch} in the range from 0 to 0xFFFF
* (inclusive), this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned.
*
* @param ch a character (Unicode code point).
* @return the index of the first occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int indexOf(int ch) {
return back.indexOf(ch);
}
/**
* Returns the index of the first occurrence of the given element as an {@code Option}.
*
* @param ch a character (Unicode code point).
* @return {@code Some(index)} or {@code None} if not found.
*/
Option<Integer> indexOfOption(int ch) {
return io.vavr.collection.Collections.indexOption(indexOf(ch));
}
/**
* Returns the index within this string of the first occurrence of the
* specified character, starting the search at the specified index.
* <p>
* If a character with value {@code ch} occurs in the
* character sequence represented by this {@code CharSeq}
* object at an index no smaller than {@code fromIndex}, then
* the index of the first such occurrence is returned. For values
* of {@code ch} in the range from 0 to 0xFFFF (inclusive),
* this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or after position {@code fromIndex}, then
* {@code -1} is returned.
* <p>
* There is no restriction on the value of {@code fromIndex}. If it
* is negative, it has the same effect as if it were zero: this entire
* string may be searched. If it is greater than the length of this
* string, it has the same effect as if it were equal to the length of
* this string: {@code -1} is returned.
* <p>
* All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from.
* @return the index of the first occurrence of the character in the
* character sequence represented by this object that is greater
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur.
*/
public int indexOf(int ch, int fromIndex) {
return back.indexOf(ch, fromIndex);
}
/**
* Returns the index of the first occurrence of the given element as an {@code Option},
* starting the search at the specified index.
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from.
* @return {@code Some(index)} or {@code None} if not found.
*/
Option<Integer> indexOfOption(int ch, int fromIndex) {
return io.vavr.collection.Collections.indexOption(indexOf(ch, fromIndex));
}
/**
* Returns the index within this string of the last occurrence of
* the specified character. For values of {@code ch} in the
* range from 0 to 0xFFFF (inclusive), the index (in Unicode code
* units) returned is the largest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned. The
* {@code CharSeq} is searched backwards starting at the last
* character.
*
* @param ch a character (Unicode code point).
* @return the index of the last occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int lastIndexOf(int ch) {
return back.lastIndexOf(ch);
}
/**
* Returns the index of the last occurrence of the given element as an {@code Option}.
*
* @param ch a character (Unicode code point).
* @return {@code Some(index)} or {@code None} if not found.
*/
Option<Integer> lastIndexOfOption(int ch) {
return io.vavr.collection.Collections.indexOption(lastIndexOf(ch));
}
/**
* Returns the index within this string of the last occurrence of
* the specified character, searching backward starting at the
* specified index. For values of {@code ch} in the range
* from 0 to 0xFFFF (inclusive), the index returned is the largest
* value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or before position {@code fromIndex}, then
* {@code -1} is returned.
* <p>
* All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from. There is no
* restriction on the value of {@code fromIndex}. If it is
* greater than or equal to the length of this string, it has
* the same effect as if it were equal to one less than the
* length of this string: this entire string may be searched.
* If it is negative, it has the same effect as if it were -1:
* -1 is returned.
* @return the index of the last occurrence of the character in the
* character sequence represented by this object that is less
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur before that point.
*/
public int lastIndexOf(int ch, int fromIndex) {
return back.lastIndexOf(ch, fromIndex);
}
/**
* Returns the index of the last occurrence of the given element as an {@code Option},
* starting the search at the specified index.
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from.
* @return {@code Some(index)} or {@code None} if not found.
*/
public Option<Integer> lastIndexOfOption(int ch, int fromIndex) {
return io.vavr.collection.Collections.indexOption(lastIndexOf(ch, fromIndex));
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring.
* <p>
* The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the first occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str) {
return back.indexOf(str.back);
}
/**
* Returns the index of the first occurrence of the given element as an {@code Option}.
*
* @param str the substring to search for.
* @return {@code Some(index)} or {@code None} if not found.
*/
public Option<Integer> indexOfOption(CharSeq str) {
return io.vavr.collection.Collections.indexOption(indexOf(str));
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring, starting at the specified index.
* <p>
* The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> >= fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index from which to start the search.
* @return the index of the first occurrence of the specified substring,
* starting at the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str, int fromIndex) {
return back.indexOf(str.back, fromIndex);
}
/**
* Returns the index of the first occurrence of the given element as an {@code Option},
* starting the search at the specified index.
*
* @param str the substring to search for.
* @param fromIndex the index from which to start the search.
* @return {@code Some(index)} or {@code None} if not found.
*/
public Option<Integer> indexOfOption(CharSeq str, int fromIndex) {
return io.vavr.collection.Collections.indexOption(indexOf(str, fromIndex));
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring. The last occurrence of the empty string ""
* is considered to occur at the index value {@code this.length()}.
* <p>
* The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the last occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str) {
return back.lastIndexOf(str.back);
}
/**
* Returns the index of the last occurrence of the given element as an {@code Option}.
*
* @param str the substring to search for.
* @return {@code Some(index)} or {@code None} if not found.
*/
public Option<Integer> lastIndexOfOption(CharSeq str) {
return io.vavr.collection.Collections.indexOption(lastIndexOf(str));
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring, searching backward starting at the specified index.
* <p>
* The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> {@code <=} fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index to start the search from.
* @return the index of the last occurrence of the specified substring,
* searching backward from the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str, int fromIndex) {
return back.lastIndexOf(str.back, fromIndex);
}
/**
* Returns the index of the last occurrence of the given element as an {@code Option},
* starting the search at the specified index.
*
* @param str the substring to search for.
* @param fromIndex the index to start the search from.
* @return {@code Some(index)} or {@code None} if not found.
*/
public Option<Integer> lastIndexOfOption(CharSeq str, int fromIndex) {
return io.vavr.collection.Collections.indexOption(lastIndexOf(str, fromIndex));
}
/**
* Returns a string that is a substring of this string. The
* substring begins with the character at the specified index and
* extends to the end of this string. <p>
* Examples:
* <blockquote><pre>
* "unhappy".substring(2) returns "happy"
* "Harbison".substring(3) returns "bison"
* "emptiness".substring(9) returns "" (an empty string)
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if
* {@code beginIndex} is negative or larger than the
* length of this {@code CharSeq} object.
*/
public CharSeq substring(int beginIndex) {
return CharSeq.of(back.substring(beginIndex));
}
/**
* Returns a string that is a substring of this string. The
* substring begins at the specified {@code beginIndex} and
* extends to the character at index {@code endIndex - 1}.
* Thus the length of the substring is {@code endIndex-beginIndex}.
* <p>
* Examples:
* <blockquote><pre>
* "hamburger".substring(4, 8) returns "urge"
* "smiles".substring(1, 5) returns "mile"
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @param endIndex the ending index, exclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or
* {@code endIndex} is larger than the length of
* this {@code CharSeq} object, or
* {@code beginIndex} is larger than
* {@code endIndex}.
*/
public CharSeq substring(int beginIndex, int endIndex) {
return CharSeq.of(back.substring(beginIndex, endIndex));
}
@Override
public String stringPrefix() {
return "CharSeq";
}
/**
* Returns a string containing the characters in this sequence in the same
* order as this sequence. The length of the string will be the length of
* this sequence.
*
* @return a string consisting of exactly this sequence of characters
*/
@Override
public String toString() {
return back;
}
/**
* Concatenates the specified string to the end of this string.
* <p>
* If the length of the argument string is {@code 0}, then this
* {@code CharSeq} object is returned. Otherwise, a
* {@code CharSeq} object is returned that represents a character
* sequence that is the concatenation of the character sequence
* represented by this {@code CharSeq} object and the character
* sequence represented by the argument string.<p>
* Examples:
* <blockquote><pre>
* "cares".concat("s") returns "caress"
* "to".concat("get").concat("her") returns "together"
* </pre></blockquote>
*
* @param str the {@code CharSeq} that is concatenated to the end
* of this {@code CharSeq}.
* @return a string that represents the concatenation of this object's
* characters followed by the string argument's characters.
*/
public CharSeq concat(CharSeq str) {
return CharSeq.of(back.concat(str.back));
}
/**
* Tells whether or not this string matches the given <a
* href="https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#sum">regular expression</a>.
* <p>
* An invocation of this method of the form
* <i>str</i>{@code .matches(}<i>regex</i>{@code )} yields exactly the
* same result as the expression
* <blockquote>
* {@link Pattern}.{@link Pattern#matches(String, CharSequence)
* matches(<i>regex</i>, <i>str</i>)}
* </blockquote>
*
* @param regex the regular expression to which this string is to be matched
* @return {@code true} if, and only if, this string matches the
* given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see Pattern
*/
public boolean matches(String regex) {
return back.matches(regex);
}
/**
* Returns true if and only if this string contains the specified
* sequence of char values.
*
* @param s the sequence to search for
* @return true if this string contains {@code s}, false otherwise
*/
public boolean contains(CharSequence s) {
return back.contains(s);
}
/**
* Replaces the first substring of this string that matches the given <a
* href="https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
* <p>
* An invocation of this method of the form
* <i>str</i>{@code .replaceFirst(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
* <blockquote>
* <code>
* {@link Pattern}.{@link
* Pattern#compile compile}(<i>regex</i>).{@link
* Pattern#matcher(CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceFirst replaceFirst}(<i>repl</i>)
* </code>
* </blockquote>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceFirst}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for the first match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see Pattern
*/
public CharSeq replaceFirst(String regex, String replacement) {
return CharSeq.of(back.replaceFirst(regex, replacement));
}
/**
* Replaces each substring of this string that matches the given <a
* href="https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
* <p>
* An invocation of this method of the form
* <i>str</i>{@code .replaceAll(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
* <blockquote>
* <code>
* {@link Pattern}.{@link
* Pattern#compile compile}(<i>regex</i>).{@link
* Pattern#matcher(CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceAll replaceAll}(<i>repl</i>)
* </code>
* </blockquote>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceAll Matcher.replaceAll}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for each match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see Pattern
*/
public CharSeq replaceAll(String regex, String replacement) {
return CharSeq.of(back.replaceAll(regex, replacement));
}
/**
* Replaces each substring of this string that matches the literal target
* sequence with the specified literal replacement sequence. The
* replacement proceeds from the beginning of the string to the end, for
* example, replacing "aa" with "b" in the string "aaa" will result in
* "ba" rather than "ab".
*
* @param target The sequence of char values to be replaced
* @param replacement The replacement sequence of char values
* @return The resulting string
*/
public CharSeq replace(CharSequence target, CharSequence replacement) {
return CharSeq.of(back.replace(target, replacement));
}
/**
* Splits this string around matches of the given
* <a href="https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#sum">regular expression</a>.
* <p>
* This method works as if by invoking the two-argument {@link #split(String, int)}
* method with the given expression and a limit argument of zero.
* Trailing empty strings are therefore not included in the resulting {@link Seq}.
* <p>
* The string {@code "boo:and:foo"}, for example, yields the following results with these expressions:
* <blockquote>
* <table style="border-collapse: collapse; border-spacing: 0">
* <caption>Split examples showing regex and result</caption>
* <tr>
* <th>Regex</th>
* <th>Result</th>
* </tr>
* <tr>
* <td>:</td>
* <td>{@code { "boo", "and", "foo" }}</td>
* </tr>
* <tr>
* <td>o</td>
* <td>{@code { "b", "", ":and:f" }}</td>
* </tr>
* </table>
* </blockquote>
*
* @param regex the delimiting regular expression
* @return the Seq of strings computed by splitting this string around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see Pattern
*/
public Seq<CharSeq> split(String regex) {
return split(regex, 0);
}
/**
* Splits this string around matches of the given
* <a href="https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html#sum">regular expression</a>.
* <p>
* The array returned by this method contains each substring of this
* string that is terminated by another substring that matches the given
* expression or is terminated by the end of the string. The substrings in
* the array are in the order in which they occur in this string. If the
* expression does not match any part of the input then the resulting array
* has just one element, namely this string.
* <p>
* When there is a positive-width match at the beginning of this
* string then an empty leading substring is included at the beginning
* of the resulting array. A zero-width match at the beginning however
* never produces such empty leading substring.
* <p>
* The {@code limit} parameter controls the number of times the
* pattern is applied and therefore affects the length of the resulting
* array. If the limit <i>n</i> is greater than zero then the pattern
* will be applied at most <i>n</i> - 1 times, the array's
* length will be no greater than <i>n</i>, and the array's last entry
* will contain all input beyond the last matched delimiter. If <i>n</i>
* is non-positive then the pattern will be applied as many times as
* possible and the array can have any length. If <i>n</i> is zero then
* the pattern will be applied as many times as possible, the array can
* have any length, and trailing empty strings will be discarded.
* <p>
* The string {@code "boo:and:foo"}, for example, yields the
* following results with these parameters:
* <blockquote>
* <table style="border-collapse: collapse; border-spacing: 0">
* <caption>Split example showing regex, limit, and result</caption>
* <tr>
* <th>Regex</th>
* <th>Limit</th>
* <th>Result</th>
* </tr>
* <tr><td>:</td>
* <td>2</td>
* <td>{@code { "boo", "and:foo" }}</td></tr>
* <tr><td>:</td>
* <td>5</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td>:</td>
* <td>-2</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td>o</td>
* <td>5</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td>o</td>
* <td>-2</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td>o</td>
* <td>0</td>
* <td>{@code { "b", "", ":and:f" }}</td></tr>
* </table></blockquote>
* An invocation of this method of the form
* <i>str.</i>{@code split(}<i>regex</i>{@code ,} <i>n</i>{@code )}
* yields the same result as the expression
* <blockquote>
* <code>
* {@link Pattern}.{@link
* Pattern#compile compile}(<i>regex</i>).{@link
* Pattern#split(CharSequence, int) split}(<i>str</i>, <i>n</i>)
* </code>
* </blockquote>
*
* @param regex the delimiting regular expression
* @param limit the result threshold, as described above
* @return the Seq of strings computed by splitting this string around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see Pattern
*/
public Seq<CharSeq> split(String regex, int limit) {
final Seq<String> split = Array.wrap(back.split(regex, limit));
return split.map(CharSeq::of);
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of lowercase mappings are in the following table:
* <table style="border-collapse: collapse; border-spacing: 0">
* <caption>Lowercase mapping examples showing language code of locale, upper case, lower case, and description</caption>
* <tr>
* <th>Language Code of Locale</th>
* <th>Upper Case</th>
* <th>Lower Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0130</td>
* <td>\u0069</td>
* <td>capital letter I with dot above -> small letter i</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0049</td>
* <td>\u0131</td>
* <td>capital letter I -> small letter dotless i </td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>French Fries</td>
* <td>french fries</td>
* <td>lowercased all chars in String</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td><img src="doc-files/capiota.gif" alt="capiota"><img src="doc-files/capchi.gif" alt="capchi">
* <img src="doc-files/captheta.gif" alt="captheta"><img src="doc-files/capupsil.gif" alt="capupsil">
* <img src="doc-files/capsigma.gif" alt="capsigma"></td>
* <td><img src="doc-files/iota.gif" alt="iota"><img src="doc-files/chi.gif" alt="chi">
* <img src="doc-files/theta.gif" alt="theta"><img src="doc-files/upsilon.gif" alt="upsilon">
* <img src="doc-files/sigma1.gif" alt="sigma"></td>
* <td>lowercased all chars in String</td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to lowercase.
* @see String#toLowerCase()
* @see String#toUpperCase()
* @see String#toUpperCase(Locale)
*/
public CharSeq toLowerCase(Locale locale) {
return CharSeq.of(back.toLowerCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the default locale. This is equivalent to calling
* {@code toLowerCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "TITLE".toLowerCase()} in a Turkish locale
* returns {@code "t\u005Cu0131tle"}, where '\u005Cu0131' is the
* LATIN SMALL LETTER DOTLESS I character.
* To obtain correct results for locale insensitive strings, use
* {@code toLowerCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to lowercase.
* @see String#toLowerCase(Locale)
*/
public CharSeq toLowerCase() {
return CharSeq.of(back.toLowerCase(Locale.getDefault()));
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of locale-sensitive and 1:M case mappings are in the following table.
*
* <table style="border-collapse: collapse; border-spacing: 0">
* <caption>Examples of locale-sensitive and 1:M case mappings. Shows Language code of locale, lower case, upper case, and description.</caption>
* <tr>
* <th>Language Code of Locale</th>
* <th>Lower Case</th>
* <th>Upper Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0069</td>
* <td>\u0130</td>
* <td>small letter i -> capital letter I with dot above</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0131</td>
* <td>\u0049</td>
* <td>small letter dotless i -> capital letter I</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>\u00df</td>
* <td>\u0053 \u0053</td>
* <td>small letter sharp s -> two letters: SS</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>Fahrvergnügen</td>
* <td>FAHRVERGNÜGEN</td>
* <td></td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to uppercase.
* @see String#toUpperCase()
* @see String#toLowerCase()
* @see String#toLowerCase(Locale)
*/
public CharSeq toUpperCase(Locale locale) {
return CharSeq.of(back.toUpperCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the default locale. This method is equivalent to
* {@code toUpperCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "title".toUpperCase()} in a Turkish locale
* returns {@code "T\u005Cu0130TLE"}, where '\u005Cu0130' is the
* LATIN CAPITAL LETTER I WITH DOT ABOVE character.
* To obtain correct results for locale insensitive strings, use
* {@code toUpperCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to uppercase.
* @see String#toUpperCase(Locale)
*/
public CharSeq toUpperCase() {
return CharSeq.of(back.toUpperCase(Locale.getDefault()));
}
/**
* Converts the first character in this {@code CharSeq} to upper
* case using the rules of the given {@code Locale}. If the {@code CharSeq} is
* empty, it won't have any effect. Case mapping is based
* on the Unicode Standard version specified by the {@link Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of locale-sensitive and 1:M case mappings are in the following table.
*
* <table style="border-collapse: collapse; border-spacing: 0">
* <caption>Examples of locale-sensitive and 1:M case mappings. Shows Language code of locale, lower case, upper case, and description.</caption>
* <tr>
* <th>Language Code of Locale</th>
* <th>Lower Case</th>
* <th>Upper Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0069</td>
* <td>\u0130</td>
* <td>small letter i -> capital letter I with dot above</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0131</td>
* <td>\u0049</td>
* <td>small letter dotless i -> capital letter I</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>\u00df</td>
* <td>\u0053 \u0053</td>
* <td>small letter sharp s -> two letters: SS</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>Fahrvergnügen</td>
* <td>FAHRVERGNÜGEN</td>
* <td></td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, capitalized.
*/
public CharSeq capitalize(Locale locale) {
if (back.isEmpty()) {
return this;
}
return CharSeq.of(back.substring(0, 1).toUpperCase(locale) + back.substring(1));
}
/**
* Converts the first character in this {@code CharSeq} to upper
* case using the rules of the default locale. If the {@code CharSeq} is
* empty, it won't have any effect. This method is equivalent to
* {@code capitalize(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "title".toUpperCase()} in a Turkish locale
* returns {@code "T\u005Cu0130TLE"}, where '\u005Cu0130' is the
* LATIN CAPITAL LETTER I WITH DOT ABOVE character.
* To obtain correct results for locale insensitive strings, use
* {@code toUpperCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, capitalized.
*/
public CharSeq capitalize() {
return capitalize(Locale.getDefault());
}
/**
* Returns a string whose value is this string, with any leading and trailing
* whitespace removed.
* <p>
* If this {@code CharSeq} object represents an empty character
* sequence, or the first and last characters of character sequence
* represented by this {@code CharSeq} object both have codes
* greater than {@code '\u005Cu0020'} (the space character), then a
* reference to this {@code CharSeq} object is returned.
* <p>
* Otherwise, if there is no character with a code greater than
* {@code '\u005Cu0020'} in the string, then a
* {@code CharSeq} object representing an empty string is
* returned.
* <p>
* Otherwise, let <i>k</i> be the index of the first character in the
* string whose code is greater than {@code '\u005Cu0020'}, and let
* <i>m</i> be the index of the last character in the string whose code
* is greater than {@code '\u005Cu0020'}. A {@code CharSeq}
* object is returned, representing the substring of this string that
* begins with the character at index <i>k</i> and ends with the
* character at index <i>m</i>-that is, the result of
* {@code this.substring(k, m + 1)}.
* <p>
* This method may be used to trim whitespace (as defined above) from
* the beginning and end of a string.
*
* @return A string whose value is this string, with any leading and trailing white
* space removed, or this string if it has no leading or
* trailing white space.
*/
public CharSeq trim() {
return of(back.trim());
}
/**
* Converts this string to a new character array.
*
* @return a newly allocated character array whose length is the length
* of this string and whose contents are initialized to contain
* the character sequence represented by this string.
*/
public char[] toCharArray() {
return back.toCharArray();
}
// -- number conversion
/**
* Decodes this {@code CharSeq} into a {@code Byte} by calling {@link Byte#decode(String)}.
* <p>
* We write
*
* <pre><code>
* Byte value = charSeq.decodeByte();
* </code></pre>
*
* instead of
*
* <pre><code>
* Byte value = Byte.decode(charSeq.mkString());
* </code></pre>
*
* @return a {@code Byte} object holding the byte value represented by this {@code CharSeq}
* @throws NumberFormatException if this {@code CharSeq} does not contain a parsable byte.
*/
public Byte decodeByte() {
return Byte.decode(back);
}
/**
* Decodes this {@code CharSeq} into an {@code Integer} by calling {@link Integer#decode(String)}.
* <p>
* We write
*
* <pre><code>
* Integer value = charSeq.decodeInteger();
* </code></pre>
*
* instead of
*
* <pre><code>
* Integer value = Integer.decode(charSeq.mkString());
* </code></pre>
*
* @return an {@code Integer} object holding the int value represented by this {@code CharSeq}
* @throws NumberFormatException if this {@code CharSeq} does not contain a parsable int.
*/
public Integer decodeInteger() {
return Integer.decode(back);
}
/**
* Decodes this {@code CharSeq} into a {@code Long} by calling {@link Long#decode(String)}.
* <p>
* We write
*
* <pre><code>
* Long value = charSeq.decodeLong();
* </code></pre>
*
* instead of
*
* <pre><code>
* Long value = Long.decode(charSeq.mkString());
* </code></pre>
*
* @return a {@code Long} object holding the long value represented by this {@code CharSeq}
* @throws NumberFormatException if this {@code CharSeq} does not contain a parsable long.
*/
public Long decodeLong() {
return Long.decode(back);
}
/**
* Decodes this {@code CharSeq} into a {@code Short} by calling {@link Short#decode(String)}.
* <p>
* We write
*
* <pre><code>
* Short value = charSeq.decodeShort();
* </code></pre>
*
* instead of
*
* <pre><code>
* Short value = Short.decode(charSeq.mkString());
* </code></pre>
*
* @return a {@code Short} object holding the short value represented by this {@code CharSeq}
* @throws NumberFormatException if this {@code CharSeq} does not contain a parsable short.
*/
public Short decodeShort() {
return Short.decode(back);
}
/**
* Parses this {@code CharSeq} as a boolean by calling {@link Boolean#parseBoolean(String)}.
* <p>
* We write
*
* <pre><code>
* boolean value = charSeq.parseBoolean();
* </code></pre>
*
* instead of
*
* <pre><code>
* boolean value = Boolean.parseBoolean(charSeq.mkString());
* </code></pre>
*
* @return the boolean represented by this {@code CharSeq}
*/
public boolean parseBoolean() {
return Boolean.parseBoolean(back);
}
/**
* Parses this {@code CharSeq} as a signed decimal byte by calling {@link Byte#parseByte(String)}.
* <p>
* We write
*
* <pre><code>
* byte value = charSeq.parseByte();
* </code></pre>
*
* instead of
*
* <pre><code>
* byte value = Byte.parseByte(charSeq.mkString());
* </code></pre>
*
* @return the byte value represented by this {@code CharSeq} in decimal
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable byte.
*/
public byte parseByte() {
return Byte.parseByte(back);
}
/**
* Parses this {@code CharSeq} as a signed byte in the specified radix
* by calling {@link Byte#parseByte(String, int)}.
* <p>
* We write
*
* <pre><code>
* byte value = charSeq.parseByte(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* byte value = Byte.parseByte(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this {@code CharSeq}
* @return the byte value represented by this {@code CharSeq} in the specified radix
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable byte.
*/
public byte parseByte(int radix) {
return Byte.parseByte(back, radix);
}
/**
* Parses this {@code CharSeq} as a double by calling {@link Double#parseDouble(String)}.
* <p>
* We write
*
* <pre><code>
* double value = charSeq.parseDouble();
* </code></pre>
*
* instead of
*
* <pre><code>
* double value = Double.parseDouble(charSeq.mkString());
* </code></pre>
*
* @return the double value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable double.
*/
public double parseDouble() {
return Double.parseDouble(back);
}
/**
* Parses this {@code CharSeq} as a float by calling {@link Float#parseFloat(String)}.
* <p>
* We write
*
* <pre><code>
* float value = charSeq.parseFloat();
* </code></pre>
*
* instead of
*
* <pre><code>
* float value = Double.parseFloat(charSeq.mkString());
* </code></pre>
*
* @return the float value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable float.
*/
public float parseFloat() {
return Float.parseFloat(back);
}
/**
* Parses this {@code CharSeq} as a signed decimal int by calling {@link Integer#parseInt(String)}.
* <p>
* We write
*
* <pre><code>
* int value = charSeq.parseInt();
* </code></pre>
*
* instead of
*
* <pre><code>
* int value = Integer.parseInt(charSeq.mkString());
* </code></pre>
*
* @return the int value represented by this {@code CharSeq} in decimal
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable int.
*/
public int parseInt() {
return Integer.parseInt(back);
}
/**
* Parses this {@code CharSeq} as a signed int in the specified radix
* by calling {@link Integer#parseInt(String, int)}.
* <p>
* We write
*
* <pre><code>
* int value = charSeq.parseInt(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* int value = Integer.parseInt(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this {@code CharSeq}
* @return the int value represented by this {@code CharSeq} in the specified radix
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable int.
*/
public int parseInt(int radix) {
return Integer.parseInt(back, radix);
}
/**
* Parses this {@code CharSeq} as a unsigned decimal int by calling {@link Integer#parseUnsignedInt(String)}.
* <p>
* We write
*
* <pre><code>
* int value = charSeq.parseUnsignedInt();
* </code></pre>
*
* instead of
*
* <pre><code>
* int value = Integer.parseUnsignedInt(charSeq.mkString());
* </code></pre>
*
* @return the unsigned int value represented by this {@code CharSeq} in decimal
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable unsigned int.
*/
public int parseUnsignedInt() {
return Integer.parseUnsignedInt(back);
}
/**
* Parses this {@code CharSeq} as a unsigned int in the specified radix
* by calling {@link Integer#parseUnsignedInt(String, int)}.
* <p>
* We write
*
* <pre><code>
* int value = charSeq.parseUnsignedInt(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* int value = Integer.parseUnsignedInt(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this {@code CharSeq}
* @return the unsigned int value represented by this {@code CharSeq} in the specified radix
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable unsigned int.
*/
public int parseUnsignedInt(int radix) {
return Integer.parseUnsignedInt(back, radix);
}
/**
* Parses this {@code CharSeq} as a signed decimal long by calling {@link Long#parseLong(String)}.
* <p>
* We write
*
* <pre><code>
* long value = charSeq.parseLong();
* </code></pre>
*
* instead of
*
* <pre><code>
* long value = Long.parseLong(charSeq.mkString());
* </code></pre>
*
* @return the long value represented by this {@code CharSeq} in decimal
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable long.
*/
public long parseLong() {
return Long.parseLong(back);
}
/**
* Parses this {@code CharSeq} as a signed long in the specified radix
* by calling {@link Long#parseLong(String, int)}.
* <p>
* We write
*
* <pre><code>
* long value = charSeq.parseLong(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* long value = Long.parseLong(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this {@code CharSeq}
* @return the long value represented by this {@code CharSeq} in the specified radix
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable long.
*/
public long parseLong(int radix) {
return Long.parseLong(back, radix);
}
/**
* Parses this {@code CharSeq} as a unsigned decimal long by calling {@link Long#parseUnsignedLong(String)}.
* <p>
* We write
*
* <pre><code>
* long value = charSeq.parseUnsignedLong();
* </code></pre>
*
* instead of
*
* <pre><code>
* long value = Long.parseUnsignedLong(charSeq.mkString());
* </code></pre>
*
* @return the unsigned long value represented by this {@code CharSeq} in decimal
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable unsigned long.
*/
public long parseUnsignedLong() {
return Long.parseUnsignedLong(back);
}
/**
* Parses this {@code CharSeq} as a unsigned long in the specified radix
* by calling {@link Long#parseUnsignedLong(String, int)}.
* <p>
* We write
*
* <pre><code>
* long value = charSeq.parseUnsignedLong(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* long value = Long.parseUnsignedLong(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this {@code CharSeq}
* @return the unsigned long value represented by this {@code CharSeq} in the specified radix
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable unsigned long.
*/
public long parseUnsignedLong(int radix) {
return Long.parseUnsignedLong(back, radix);
}
/**
* Parses this {@code CharSeq} as a signed decimal short by calling {@link Short#parseShort(String)}.
* <p>
* We write
*
* <pre><code>
* short value = charSeq.parseShort();
* </code></pre>
*
* instead of
*
* <pre><code>
* short value = Short.parseShort(charSeq.mkString());
* </code></pre>
*
* @return the short value represented by this {@code CharSeq} in decimal
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable short.
*/
public short parseShort() {
return Short.parseShort(back);
}
/**
* Parses this {@code CharSeq} as a signed short in the specified radix
* by calling {@link Short#parseShort(String, int)}.
* <p>
* We write
*
* <pre><code>
* short value = charSeq.parseShort(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* short value = Short.parseShort(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this {@code CharSeq}
* @return the short value represented by this {@code CharSeq} in the specified radix
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable short.
*/
public short parseShort(int radix) {
return Short.parseShort(back, radix);
}
/**
* Converts this {@code CharSeq} to a {@code Boolean} by calling {@link Boolean#valueOf(String)}.
* <p>
* We write
*
* <pre><code>
* Boolean value = charSeq.toBoolean();
* </code></pre>
*
* instead of
*
* <pre><code>
* Boolean value = Boolean.valueOf(charSeq.mkString());
* </code></pre>
*
* @return the {@code Boolean} value represented by this {@code CharSeq}
*/
public Boolean toBoolean() {
return Boolean.valueOf(back);
}
/**
* Converts this {@code CharSeq} to a {@code Byte} by calling {@link Byte#valueOf(String)}.
* <p>
* We write
*
* <pre><code>
* Byte value = charSeq.toByte();
* </code></pre>
*
* instead of
*
* <pre><code>
* Byte value = Byte.valueOf(charSeq.mkString());
* </code></pre>
*
* @return a {@code Byte} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable byte.
*/
public Byte toByte() {
return Byte.valueOf(back);
}
/**
* Converts this {@code CharSeq} to a {@code Byte} in the specified radix
* by calling {@link Byte#valueOf(String, int)}.
* <p>
* We write
*
* <pre><code>
* Byte value = charSeq.toByte(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* Byte value = Byte.valueOf(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this char sequence
* @return a {@code Byte} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable byte.
*/
public Byte toByte(int radix) {
return Byte.valueOf(back, radix);
}
/**
* Converts this {@code CharSeq} to a {@code Double} by calling {@link Double#valueOf(String)}.
* <p>
* We write
*
* <pre><code>
* Double value = charSeq.toDouble();
* </code></pre>
*
* instead of
*
* <pre><code>
* Double value = Double.valueOf(charSeq.mkString());
* </code></pre>
*
* @return a {@code Double} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable double.
*/
public Double toDouble() {
return Double.valueOf(back);
}
/**
* Converts this {@code CharSeq} to a {@code Float} by calling {@link Float#valueOf(String)}.
* <p>
* We write
*
* <pre><code>
* Float value = charSeq.toFloat();
* </code></pre>
*
* instead of
*
* <pre><code>
* Float value = Float.valueOf(charSeq.mkString());
* </code></pre>
*
* @return a {@code Float} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable float.
*/
public Float toFloat() {
return Float.valueOf(back);
}
/**
* Converts this {@code CharSeq} to an {@code Integer} by calling {@link Integer#valueOf(String)}.
* <p>
* We write
*
* <pre><code>
* Integer value = charSeq.toInteger();
* </code></pre>
*
* instead of
*
* <pre><code>
* Integer value = Integer.valueOf(charSeq.mkString());
* </code></pre>
*
* @return an {@code Integer} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable int.
*/
public Integer toInteger() {
return Integer.valueOf(back);
}
/**
* Converts this {@code CharSeq} to an {@code Integer} in the specified radix
* by calling {@link Integer#valueOf(String, int)}.
* <p>
* We write
*
* <pre><code>
* Integer value = charSeq.toInteger(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* Integer value = Integer.valueOf(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this char sequence
* @return an {@code Integer} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable int.
*/
public Integer toInteger(int radix) {
return Integer.valueOf(back, radix);
}
/**
* Converts this {@code CharSeq} to a {@code Long} by calling {@link Long#valueOf(String)}.
* <p>
* We write
*
* <pre><code>
* Long value = charSeq.toLong();
* </code></pre>
*
* instead of
*
* <pre><code>
* Long value = Long.valueOf(charSeq.mkString());
* </code></pre>
*
* @return a {@code Long} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable long.
*/
public Long toLong() {
return Long.valueOf(back);
}
/**
* Converts this {@code CharSeq} to a {@code Long} in the specified radix
* by calling {@link Long#valueOf(String, int)}.
* <p>
* We write
*
* <pre><code>
* Long value = charSeq.toLong(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* Long value = Long.valueOf(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this char sequence
* @return a {@code Long} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable long.
*/
public Long toLong(int radix) {
return Long.valueOf(back, radix);
}
/**
* Converts this {@code CharSeq} to a {@code Short} by calling {@link Short#valueOf(String)}.
* <p>
* We write
*
* <pre><code>
* Short value = charSeq.toShort();
* </code></pre>
*
* instead of
*
* <pre><code>
* Short value = Short.valueOf(charSeq.mkString());
* </code></pre>
*
* @return a {@code Short} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable short.
*/
public Short toShort() {
return Short.valueOf(back);
}
/**
* Converts this {@code CharSeq} to a {@code Short} in the specified radix
* by calling {@link Short#valueOf(String, int)}.
* <p>
* We write
*
* <pre><code>
* Short value = charSeq.toShort(radix);
* </code></pre>
*
* instead of
*
* <pre><code>
* Short value = Short.valueOf(charSeq.mkString(), radix);
* </code></pre>
*
* @param radix the radix to be used in interpreting this char sequence
* @return a {@code Short} object holding the value represented by this {@code CharSeq}
* @throws NumberFormatException If this {@code CharSeq} does not contain a parsable short.
*/
public Short toShort(int radix) {
return Short.valueOf(back, radix);
}
// -- conversion overrides
@Override
public Character[] toJavaArray() {
return toJavaList().toArray(new Character[0]);
}
// -- functional interfaces
@FunctionalInterface
public interface CharUnaryOperator {
char apply(char c);
}
@FunctionalInterface
public interface CharFunction<R> {
R apply(char c);
}
}
interface CharSeqModule {
interface Combinations {
static IndexedSeq<CharSeq> apply(CharSeq elements, int k) {
if (k == 0) {
return Vector.of(CharSeq.empty());
} else {
return elements.zipWithIndex().flatMap(
t -> apply(elements.drop(t._2 + 1), (k - 1)).map((CharSeq c) -> c.prepend(t._1))
);
}
}
}
}
| vavr-io/vavr | src/main/java/io/vavr/collection/CharSeq.java |
1,452 | /*
* Copyright 2013 Google Inc.
* Copyright 2014 Andreas Schildbach
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bitcoinj.core;
import com.google.common.base.Throwables;
import com.google.common.collect.Maps;
import com.google.common.collect.Ordering;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.Runnables;
import com.google.common.util.concurrent.Uninterruptibles;
import net.jcip.annotations.GuardedBy;
import org.bitcoinj.base.Network;
import org.bitcoinj.base.internal.InternalUtils;
import org.bitcoinj.base.internal.PlatformUtils;
import org.bitcoinj.base.internal.Stopwatch;
import org.bitcoinj.base.internal.TimeUtils;
import org.bitcoinj.core.listeners.AddressEventListener;
import org.bitcoinj.core.listeners.BlockchainDownloadEventListener;
import org.bitcoinj.core.listeners.BlocksDownloadedEventListener;
import org.bitcoinj.core.listeners.ChainDownloadStartedEventListener;
import org.bitcoinj.core.listeners.DownloadProgressTracker;
import org.bitcoinj.core.listeners.GetDataEventListener;
import org.bitcoinj.core.listeners.OnTransactionBroadcastListener;
import org.bitcoinj.core.listeners.PeerConnectedEventListener;
import org.bitcoinj.core.listeners.PeerDisconnectedEventListener;
import org.bitcoinj.core.listeners.PeerDiscoveredEventListener;
import org.bitcoinj.core.listeners.PreMessageReceivedEventListener;
import org.bitcoinj.net.ClientConnectionManager;
import org.bitcoinj.net.FilterMerger;
import org.bitcoinj.net.NioClientManager;
import org.bitcoinj.net.discovery.MultiplexingDiscovery;
import org.bitcoinj.net.discovery.PeerDiscovery;
import org.bitcoinj.net.discovery.PeerDiscoveryException;
import org.bitcoinj.script.Script;
import org.bitcoinj.script.ScriptPattern;
import org.bitcoinj.utils.ContextPropagatingThreadFactory;
import org.bitcoinj.utils.ExponentialBackoff;
import org.bitcoinj.utils.ListenableCompletableFuture;
import org.bitcoinj.utils.ListenerRegistration;
import org.bitcoinj.utils.Threading;
import org.bitcoinj.wallet.Wallet;
import org.bitcoinj.wallet.listeners.KeyChainEventListener;
import org.bitcoinj.wallet.listeners.ScriptsChangeEventListener;
import org.bitcoinj.wallet.listeners.WalletCoinsReceivedEventListener;
import org.bitcoinj.wallet.listeners.WalletCoinsSentEventListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NoRouteToHostException;
import java.net.Socket;
import java.net.SocketAddress;
import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import static org.bitcoinj.base.internal.Preconditions.checkArgument;
import static org.bitcoinj.base.internal.Preconditions.checkState;
/**
* <p>Runs a set of connections to the P2P network, brings up connections to replace disconnected nodes and manages
* the interaction between them all. Most applications will want to use one of these.</p>
*
* <p>PeerGroup tries to maintain a constant number of connections to a set of distinct peers.
* Each peer runs a network listener in its own thread. When a connection is lost, a new peer
* will be tried after a delay as long as the number of connections less than the maximum.</p>
*
* <p>Connections are made to addresses from a provided list. When that list is exhausted,
* we start again from the head of the list.</p>
*
* <p>The PeerGroup can broadcast a transaction to the currently connected set of peers. It can
* also handle download of the blockchain from peers, restarting the process when peers die.</p>
*
* <p>A PeerGroup won't do anything until you call the {@link PeerGroup#start()} method
* which will block until peer discovery is completed and some outbound connections
* have been initiated (it will return before handshaking is done, however).
* You should call {@link PeerGroup#stop()} when finished. Note that not all methods
* of PeerGroup are safe to call from a UI thread as some may do network IO,
* but starting and stopping the service should be fine.</p>
*/
public class PeerGroup implements TransactionBroadcaster {
private static final Logger log = LoggerFactory.getLogger(PeerGroup.class);
protected final ReentrantLock lock = Threading.lock(PeerGroup.class);
// All members in this class should be marked with final, volatile, @GuardedBy or a mix as appropriate to define
// their thread safety semantics. Volatile requires a Hungarian-style v prefix.
// By default we don't require any services because any peer will do.
private long requiredServices = 0;
/**
* The default number of connections to the p2p network the library will try to build. This is set to 12 empirically.
* It used to be 4, but because we divide the connection pool in two for broadcasting transactions, that meant we
* were only sending transactions to two peers and sometimes this wasn't reliable enough: transactions wouldn't
* get through.
*/
public static final int DEFAULT_CONNECTIONS = 12;
private volatile int vMaxPeersToDiscoverCount = 100;
private static final Duration DEFAULT_PEER_DISCOVERY_TIMEOUT = Duration.ofSeconds(5);
private volatile Duration vPeerDiscoveryTimeout = DEFAULT_PEER_DISCOVERY_TIMEOUT;
protected final NetworkParameters params;
@Nullable protected final AbstractBlockChain chain;
// This executor is used to queue up jobs: it's used when we don't want to use locks for mutual exclusion,
// typically because the job might call in to user provided code that needs/wants the freedom to use the API
// however it wants, or because a job needs to be ordered relative to other jobs like that.
protected final ScheduledExecutorService executor;
// Whether the peer group is currently running. Once shut down it cannot be restarted.
private volatile boolean vRunning;
// Whether the peer group has been started or not. An unstarted PG does not try to access the network.
private volatile boolean vUsedUp;
// Addresses to try to connect to, excluding active peers.
@GuardedBy("lock") private final PriorityQueue<PeerAddress> inactives;
@GuardedBy("lock") private final Map<PeerAddress, ExponentialBackoff> backoffMap;
@GuardedBy("lock") private final Map<PeerAddress, Integer> priorityMap;
// Currently active peers. This is an ordered list rather than a set to make unit tests predictable.
private final CopyOnWriteArrayList<Peer> peers;
// Currently connecting peers.
private final CopyOnWriteArrayList<Peer> pendingPeers;
private final ClientConnectionManager channels;
// The peer that has been selected for the purposes of downloading announced data.
@GuardedBy("lock") private Peer downloadPeer;
// Callback for events related to chain download.
@Nullable @GuardedBy("lock") private BlockchainDownloadEventListener downloadListener;
private final CopyOnWriteArrayList<ListenerRegistration<BlocksDownloadedEventListener>> peersBlocksDownloadedEventListeners
= new CopyOnWriteArrayList<>();
private final CopyOnWriteArrayList<ListenerRegistration<ChainDownloadStartedEventListener>> peersChainDownloadStartedEventListeners
= new CopyOnWriteArrayList<>();
/** Callbacks for events related to peers connecting */
protected final CopyOnWriteArrayList<ListenerRegistration<PeerConnectedEventListener>> peerConnectedEventListeners
= new CopyOnWriteArrayList<>();
/** Callbacks for events related to peer connection/disconnection */
protected final CopyOnWriteArrayList<ListenerRegistration<PeerDiscoveredEventListener>> peerDiscoveredEventListeners
= new CopyOnWriteArrayList<>();
/** Callbacks for events related to peers disconnecting */
protected final CopyOnWriteArrayList<ListenerRegistration<PeerDisconnectedEventListener>> peerDisconnectedEventListeners
= new CopyOnWriteArrayList<>();
/** Callbacks for events related to peer data being received */
private final CopyOnWriteArrayList<ListenerRegistration<GetDataEventListener>> peerGetDataEventListeners
= new CopyOnWriteArrayList<>();
private final CopyOnWriteArrayList<ListenerRegistration<PreMessageReceivedEventListener>> peersPreMessageReceivedEventListeners
= new CopyOnWriteArrayList<>();
protected final CopyOnWriteArrayList<ListenerRegistration<OnTransactionBroadcastListener>> peersTransactionBroadastEventListeners
= new CopyOnWriteArrayList<>();
// Discover peers via addr and addrv2 messages?
private volatile boolean vDiscoverPeersViaP2P = false;
// Peer discovery sources, will be polled occasionally if there aren't enough inactives.
private final CopyOnWriteArraySet<PeerDiscovery> peerDiscoverers;
// The version message to use for new connections.
@GuardedBy("lock") private VersionMessage versionMessage;
// Maximum depth up to which pending transaction dependencies are downloaded, or 0 for disabled.
@GuardedBy("lock") private int downloadTxDependencyDepth;
// How many connections we want to have open at the current time. If we lose connections, we'll try opening more
// until we reach this count.
@GuardedBy("lock") private int maxConnections;
// Minimum protocol version we will allow ourselves to connect to: require Bloom filtering.
private volatile int vMinRequiredProtocolVersion;
/** How many milliseconds to wait after receiving a pong before sending another ping. */
public static final long DEFAULT_PING_INTERVAL_MSEC = 2000;
@GuardedBy("lock") private long pingIntervalMsec = DEFAULT_PING_INTERVAL_MSEC;
@GuardedBy("lock") private boolean useLocalhostPeerWhenPossible = true;
@GuardedBy("lock") private boolean ipv6Unreachable = false;
@GuardedBy("lock") private Instant fastCatchupTime;
private final CopyOnWriteArrayList<Wallet> wallets;
private final CopyOnWriteArrayList<PeerFilterProvider> peerFilterProviders;
// This event listener is added to every peer. It's here so when we announce transactions via an "inv", every
// peer can fetch them.
private final PeerListener peerListener = new PeerListener();
private int minBroadcastConnections = 0;
private final ScriptsChangeEventListener walletScriptsEventListener = (wallet, scripts, isAddingScripts) -> recalculateFastCatchupAndFilter(FilterRecalculateMode.SEND_IF_CHANGED);
private final KeyChainEventListener walletKeyEventListener = keys -> recalculateFastCatchupAndFilter(FilterRecalculateMode.SEND_IF_CHANGED);
private final WalletCoinsReceivedEventListener walletCoinsReceivedEventListener = (wallet, tx, prevBalance, newBalance) -> onCoinsReceivedOrSent(wallet, tx);
private final WalletCoinsSentEventListener walletCoinsSentEventListener = (wallet, tx, prevBalance, newBalance) -> onCoinsReceivedOrSent(wallet, tx);
public static final int MAX_ADDRESSES_PER_ADDR_MESSAGE = 16;
private void onCoinsReceivedOrSent(Wallet wallet, Transaction tx) {
// We received a relevant transaction. We MAY need to recalculate and resend the Bloom filter, but only
// if we have received a transaction that includes a relevant P2PK or P2WPKH output.
//
// The reason is that P2PK and P2WPKH outputs, when spent, will not repeat any data we can predict in their
// inputs. So a remote peer will update the Bloom filter for us when such an output is seen matching the
// existing filter, so that it includes the tx hash in which the P2PK/P2WPKH output was observed. Thus
// the spending transaction will always match (due to the outpoint structure).
//
// Unfortunately, whilst this is required for correct sync of the chain in blocks, there are two edge cases.
//
// (1) If a wallet receives a relevant, confirmed P2PK/P2WPKH output that was not broadcast across the network,
// for example in a coinbase transaction, then the node that's serving us the chain will update its filter
// but the rest will not. If another transaction then spends it, the other nodes won't match/relay it.
//
// (2) If we receive a P2PK/P2WPKH output broadcast across the network, all currently connected nodes will see
// it and update their filter themselves, but any newly connected nodes will receive the last filter we
// calculated, which would not include this transaction.
//
// For this reason we check if the transaction contained any relevant P2PKs or P2WPKHs and force a recalc
// and possibly retransmit if so. The recalculation process will end up including the tx hash into the
// filter. In case (1), we need to retransmit the filter to the connected peers. In case (2), we don't
// and shouldn't, we should just recalculate and cache the new filter for next time.
for (TransactionOutput output : tx.getOutputs()) {
Script scriptPubKey = output.getScriptPubKey();
if (ScriptPattern.isP2PK(scriptPubKey) || ScriptPattern.isP2WPKH(scriptPubKey)) {
if (output.isMine(wallet)) {
if (tx.getConfidence().getConfidenceType() == TransactionConfidence.ConfidenceType.BUILDING)
recalculateFastCatchupAndFilter(FilterRecalculateMode.SEND_IF_CHANGED);
else
recalculateFastCatchupAndFilter(FilterRecalculateMode.DONT_SEND);
return;
}
}
}
}
// Exponential backoff for peers starts at 1 second and maxes at 10 minutes.
private final ExponentialBackoff.Params peerBackoffParams = new ExponentialBackoff.Params(Duration.ofSeconds(1),
1.5f, Duration.ofMinutes(10));
// Tracks failures globally in case of a network failure.
@GuardedBy("lock") private ExponentialBackoff groupBackoff = new ExponentialBackoff(new ExponentialBackoff.Params(Duration.ofSeconds(1), 1.5f, Duration.ofSeconds(10)));
// This is a synchronized set, so it locks on itself. We use it to prevent TransactionBroadcast objects from
// being garbage collected if nothing in the apps code holds on to them transitively. See the discussion
// in broadcastTransaction.
private final Set<TransactionBroadcast> runningBroadcasts;
private class PeerListener implements GetDataEventListener, BlocksDownloadedEventListener, AddressEventListener {
public PeerListener() {
}
@Override
public List<Message> getData(Peer peer, GetDataMessage m) {
return handleGetData(m);
}
@Override
public void onBlocksDownloaded(Peer peer, Block block, @Nullable FilteredBlock filteredBlock, int blocksLeft) {
if (chain == null) return;
final double rate = chain.getFalsePositiveRate();
final double target = bloomFilterMerger.getBloomFilterFPRate() * MAX_FP_RATE_INCREASE;
if (rate > target) {
// TODO: Avoid hitting this path if the remote peer didn't acknowledge applying a new filter yet.
log.info("Force update Bloom filter due to high false positive rate ({} vs {})", rate, target);
recalculateFastCatchupAndFilter(FilterRecalculateMode.FORCE_SEND_FOR_REFRESH);
}
}
/**
* Called when a peer receives an {@code addr} or {@code addrv2} message, usually in response to a
* {@code getaddr} message.
*
* @param peer the peer that received the addr or addrv2 message
* @param message the addr or addrv2 message that was received
*/
@Override
public void onAddr(Peer peer, AddressMessage message) {
if (!vDiscoverPeersViaP2P)
return;
List<PeerAddress> addresses = new LinkedList<>(message.getAddresses());
// Make sure we pick random addresses.
Collections.shuffle(addresses);
int numAdded = 0;
for (PeerAddress address : addresses) {
if (!address.getServices().has(requiredServices))
continue;
// Add to inactive pool with slightly elevated priority because services fit.
boolean added = addInactive(address, 1);
if (added)
numAdded++;
// Limit addresses picked per message.
if (numAdded >= MAX_ADDRESSES_PER_ADDR_MESSAGE)
break;
}
log.info("{} gossiped {} addresses, added {} of them to the inactive pool", peer.getAddress(),
addresses.size(), numAdded);
}
}
private class PeerStartupListener implements PeerConnectedEventListener, PeerDisconnectedEventListener {
@Override
public void onPeerConnected(Peer peer, int peerCount) {
handleNewPeer(peer);
}
@Override
public void onPeerDisconnected(Peer peer, int peerCount) {
// The channel will be automatically removed from channels.
handlePeerDeath(peer, null);
}
}
private final PeerStartupListener startupListener = new PeerStartupListener();
/**
* The default Bloom filter false positive rate, which is selected to be extremely low such that you hardly ever
* download false positives. This provides maximum performance. Although this default can be overridden to push
* the FP rate higher, due to <a href="https://groups.google.com/forum/#!msg/bitcoinj/Ys13qkTwcNg/9qxnhwnkeoIJ">
* various complexities</a> there are still ways a remote peer can deanonymize the users wallet. This is why the
* FP rate is chosen for performance rather than privacy. If a future version of bitcoinj fixes the known
* de-anonymization attacks this FP rate may rise again (or more likely, become expressed as a bandwidth allowance).
*/
public static final double DEFAULT_BLOOM_FILTER_FP_RATE = 0.00001;
/** Maximum increase in FP rate before forced refresh of the bloom filter */
public static final double MAX_FP_RATE_INCREASE = 10.0f;
// An object that calculates bloom filters given a list of filter providers, whilst tracking some state useful
// for privacy purposes.
private final FilterMerger bloomFilterMerger;
/** The default timeout between when a connection attempt begins and version message exchange completes */
public static final Duration DEFAULT_CONNECT_TIMEOUT = Duration.ofSeconds(5);
private volatile Duration vConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
/** Whether bloom filter support is enabled when using a non FullPrunedBlockchain*/
private volatile boolean vBloomFilteringEnabled = true;
/**
* Creates a PeerGroup for the given network. No chain is provided so this node will report its chain height
* as zero to other peers. This constructor is useful if you just want to explore the network but aren't interested
* in downloading block data.
* @param network the P2P network to connect to
*/
public PeerGroup(Network network) {
this(network, null);
}
/**
* Creates a PeerGroup with the given network. No chain is provided so this node will report its chain height
* as zero to other peers. This constructor is useful if you just want to explore the network but aren't interested
* in downloading block data.
* @deprecated Use {@link #PeerGroup(Network)}
*/
@Deprecated
public PeerGroup(NetworkParameters params) {
this(params.network());
}
/**
* Creates a PeerGroup for the given network and chain. Blocks will be passed to the chain as they are broadcast
* and downloaded. This is probably the constructor you want to use.
* @param network the P2P network to connect to
* @param chain used to process blocks
*/
public PeerGroup(Network network, @Nullable AbstractBlockChain chain) {
this(network, chain, new NioClientManager());
}
/**
* Creates a PeerGroup for the given network and chain. Blocks will be passed to the chain as they are broadcast
* and downloaded.
* @deprecated Use {@link PeerGroup#PeerGroup(Network, AbstractBlockChain)}
*/
@Deprecated
public PeerGroup(NetworkParameters params, @Nullable AbstractBlockChain chain) {
this(params.network(), chain);
}
/**
* Create a PeerGroup for the given network, chain and connection manager.
* @param network the P2P network to connect to
* @param chain used to process blocks
* @param connectionManager used to create new connections and keep track of existing ones.
*/
protected PeerGroup(Network network, @Nullable AbstractBlockChain chain, ClientConnectionManager connectionManager) {
this(NetworkParameters.of(Objects.requireNonNull(network)), chain, connectionManager, DEFAULT_BLOOM_FILTER_FP_RATE);
}
/**
* Create a PeerGroup for the given network, chain and connection manager.
* @param network the P2P network to connect to
* @param chain used to process blocks
* @param connectionManager used to create new connections and keep track of existing ones.
* @param bloomFilterFpRate false positive rate for bloom filters
*/
protected PeerGroup(Network network, @Nullable AbstractBlockChain chain, ClientConnectionManager connectionManager, double bloomFilterFpRate) {
this(NetworkParameters.of(Objects.requireNonNull(network)), chain, connectionManager, bloomFilterFpRate);
}
/**
* Create a PeerGroup for the given network, chain and connection manager.
* @param params the P2P network to connect to
* @param chain used to process blocks
* @param connectionManager used to create new connections and keep track of existing ones.
* @param bloomFilterFpRate false positive rate for bloom filters
*/
// For testing only
protected PeerGroup(NetworkParameters params, @Nullable AbstractBlockChain chain, ClientConnectionManager connectionManager, double bloomFilterFpRate) {
Objects.requireNonNull(params);
Context.getOrCreate(); // create a context for convenience
this.params = params;
this.chain = chain;
fastCatchupTime = params.getGenesisBlock().time();
wallets = new CopyOnWriteArrayList<>();
peerFilterProviders = new CopyOnWriteArrayList<>();
executor = createPrivateExecutor();
// This default sentinel value will be overridden by one of two actions:
// - adding a peer discovery source sets it to the default
// - using connectTo() will increment it by one
maxConnections = 0;
int height = chain == null ? 0 : chain.getBestChainHeight();
versionMessage = new VersionMessage(params, height);
// We never request that the remote node wait for a bloom filter yet, as we have no wallets
versionMessage.relayTxesBeforeFilter = true;
downloadTxDependencyDepth = Integer.MAX_VALUE;
inactives = new PriorityQueue<>(1, new Comparator<PeerAddress>() {
@SuppressWarnings("FieldAccessNotGuarded") // only called when inactives is accessed, and lock is held then.
@Override
public int compare(PeerAddress a, PeerAddress b) {
checkState(lock.isHeldByCurrentThread());
int result = backoffMap.get(a).compareTo(backoffMap.get(b));
if (result != 0)
return result;
result = Integer.compare(getPriority(a), getPriority(b));
if (result != 0)
return result;
// Sort by port if otherwise equals - for testing
result = Integer.compare(a.getPort(), b.getPort());
return result;
}
});
backoffMap = new HashMap<>();
priorityMap = new ConcurrentHashMap<>();
peers = new CopyOnWriteArrayList<>();
pendingPeers = new CopyOnWriteArrayList<>();
channels = connectionManager;
peerDiscoverers = new CopyOnWriteArraySet<>();
runningBroadcasts = Collections.synchronizedSet(new HashSet<TransactionBroadcast>());
bloomFilterMerger = new FilterMerger(bloomFilterFpRate);
vMinRequiredProtocolVersion = ProtocolVersion.BLOOM_FILTER.intValue();
}
private CountDownLatch executorStartupLatch = new CountDownLatch(1);
protected ScheduledExecutorService createPrivateExecutor() {
ScheduledExecutorService result =
new ScheduledThreadPoolExecutor(1, new ContextPropagatingThreadFactory("PeerGroup Thread"));
// Hack: jam the executor so jobs just queue up until the user calls start() on us. For example, adding a wallet
// results in a bloom filter recalc being queued, but we don't want to do that until we're actually started.
result.execute(() -> Uninterruptibles.awaitUninterruptibly(executorStartupLatch));
return result;
}
/**
* This is how long we wait for peer discoveries to return their results.
*/
public void setPeerDiscoveryTimeout(Duration peerDiscoveryTimeout) {
this.vPeerDiscoveryTimeout = peerDiscoveryTimeout;
}
/**
* This is how many milliseconds we wait for peer discoveries to return their results.
* @deprecated use {@link #setPeerDiscoveryTimeout(Duration)}
*/
@Deprecated
public void setPeerDiscoveryTimeoutMillis(long peerDiscoveryTimeoutMillis) {
setPeerDiscoveryTimeout(Duration.ofMillis(peerDiscoveryTimeoutMillis));
}
/**
* Adjusts the desired number of connections that we will create to peers. Note that if there are already peers
* open and the new value is lower than the current number of peers, those connections will be terminated. Likewise
* if there aren't enough current connections to meet the new requested max size, some will be added.
*/
public void setMaxConnections(int maxConnections) {
int adjustment;
lock.lock();
try {
this.maxConnections = maxConnections;
if (!isRunning()) return;
} finally {
lock.unlock();
}
// We may now have too many or too few open connections. Add more or drop some to get to the right amount.
adjustment = maxConnections - channels.getConnectedClientCount();
if (adjustment > 0)
triggerConnections();
if (adjustment < 0)
channels.closeConnections(-adjustment);
}
/**
* Configure download of pending transaction dependencies. A change of values only takes effect for newly connected
* peers.
*/
public void setDownloadTxDependencies(int depth) {
lock.lock();
try {
this.downloadTxDependencyDepth = depth;
} finally {
lock.unlock();
}
}
private Runnable triggerConnectionsJob = new Runnable() {
private boolean firstRun = true;
private final Duration MIN_PEER_DISCOVERY_INTERVAL = Duration.ofSeconds(1);
@Override
public void run() {
try {
go();
} catch (Throwable e) {
log.error("Exception when trying to build connections", e); // The executor swallows exceptions :(
}
}
public void go() {
if (!vRunning) return;
boolean doDiscovery = false;
Instant now = TimeUtils.currentTime();
lock.lock();
try {
// First run: try and use a local node if there is one, for the additional security it can provide.
// But, not on Android as there are none for this platform: it could only be a malicious app trying
// to hijack our traffic.
if (!PlatformUtils.isAndroidRuntime() && useLocalhostPeerWhenPossible && maybeCheckForLocalhostPeer() && firstRun) {
log.info("Localhost peer detected, trying to use it instead of P2P discovery");
maxConnections = 0;
connectToLocalHost();
return;
}
boolean havePeerWeCanTry = !inactives.isEmpty() && backoffMap.get(inactives.peek()).retryTime().isBefore(now);
doDiscovery = !havePeerWeCanTry;
} finally {
firstRun = false;
lock.unlock();
}
// Don't hold the lock across discovery as this process can be very slow.
boolean discoverySuccess = false;
if (doDiscovery) {
discoverySuccess = discoverPeers() > 0;
}
lock.lock();
try {
if (doDiscovery) {
// Require that we have enough connections, to consider this
// a success, or we just constantly test for new peers
if (discoverySuccess && countConnectedAndPendingPeers() >= getMaxConnections()) {
groupBackoff.trackSuccess();
} else {
groupBackoff.trackFailure();
}
}
// Inactives is sorted by backoffMap time.
if (inactives.isEmpty()) {
if (countConnectedAndPendingPeers() < getMaxConnections()) {
Duration interval = TimeUtils.longest(Duration.between(now, groupBackoff.retryTime()), MIN_PEER_DISCOVERY_INTERVAL);
log.info("Peer discovery didn't provide us any more peers, will try again in "
+ interval.toMillis() + " ms.");
executor.schedule(this, interval.toMillis(), TimeUnit.MILLISECONDS);
} else {
// We have enough peers and discovery provided no more, so just settle down. Most likely we
// were given a fixed set of addresses in some test scenario.
}
return;
}
PeerAddress addrToTry;
do {
addrToTry = inactives.poll();
} while (ipv6Unreachable && addrToTry.getAddr() instanceof Inet6Address);
if (addrToTry == null) {
// We have exhausted the queue of reachable peers, so just settle down.
// Most likely we were given a fixed set of addresses in some test scenario.
return;
}
Instant retryTime = backoffMap.get(addrToTry).retryTime();
retryTime = TimeUtils.later(retryTime, groupBackoff.retryTime());
if (retryTime.isAfter(now)) {
Duration delay = Duration.between(now, retryTime);
log.info("Waiting {} ms before next connect attempt to {}", delay.toMillis(), addrToTry);
inactives.add(addrToTry);
executor.schedule(this, delay.toMillis(), TimeUnit.MILLISECONDS);
return;
}
connectTo(addrToTry, false, vConnectTimeout);
} finally {
lock.unlock();
}
if (countConnectedAndPendingPeers() < getMaxConnections()) {
executor.execute(this); // Try next peer immediately.
}
}
};
private void triggerConnections() {
// Run on a background thread due to the need to potentially retry and back off in the background.
if (!executor.isShutdown())
executor.execute(triggerConnectionsJob);
}
/** The maximum number of connections that we will create to peers. */
public int getMaxConnections() {
lock.lock();
try {
return maxConnections;
} finally {
lock.unlock();
}
}
private List<Message> handleGetData(GetDataMessage m) {
// Scans the wallets and memory pool for transactions in the getdata message and returns them.
// Runs on peer threads.
lock.lock();
try {
LinkedList<Message> transactions = new LinkedList<>();
LinkedList<InventoryItem> items = new LinkedList<>(m.getItems());
Iterator<InventoryItem> it = items.iterator();
while (it.hasNext()) {
InventoryItem item = it.next();
// Check the wallets.
for (Wallet w : wallets) {
Transaction tx = w.getTransaction(item.hash);
if (tx == null) continue;
transactions.add(tx);
it.remove();
break;
}
}
return transactions;
} finally {
lock.unlock();
}
}
/**
* Sets the {@link VersionMessage} that will be announced on newly created connections. A version message is
* primarily interesting because it lets you customize the "subVer" field which is used a bit like the User-Agent
* field from HTTP. It means your client tells the other side what it is, see
* <a href="https://github.com/bitcoin/bips/blob/master/bip-0014.mediawiki">BIP 14</a>.
*
* The VersionMessage you provide is copied and the best chain height/time filled in for each new connection,
* therefore you don't have to worry about setting that. The provided object is really more of a template.
*/
public void setVersionMessage(VersionMessage ver) {
lock.lock();
try {
versionMessage = ver;
} finally {
lock.unlock();
}
}
/**
* Returns the version message provided by setVersionMessage or a default if none was given.
*/
public VersionMessage getVersionMessage() {
lock.lock();
try {
return versionMessage;
} finally {
lock.unlock();
}
}
/**
* Sets information that identifies this software to remote nodes. This is a convenience wrapper for creating
* a new {@link VersionMessage}, calling {@link VersionMessage#appendToSubVer(String, String, String)} on it,
* and then calling {@link PeerGroup#setVersionMessage(VersionMessage)} on the result of that. See the docs for
* {@link VersionMessage#appendToSubVer(String, String, String)} for information on what the fields should contain.
*/
public void setUserAgent(String name, String version, @Nullable String comments) {
//TODO Check that height is needed here (it wasnt, but it should be, no?)
int height = chain == null ? 0 : chain.getBestChainHeight();
VersionMessage ver = new VersionMessage(params, height);
ver.relayTxesBeforeFilter = false;
updateVersionMessageRelayTxesBeforeFilter(ver);
ver.appendToSubVer(name, version, comments);
setVersionMessage(ver);
}
// Updates the relayTxesBeforeFilter flag of ver
private void updateVersionMessageRelayTxesBeforeFilter(VersionMessage ver) {
// We will provide the remote node with a bloom filter (ie they shouldn't relay yet)
// if chain == null || !chain.shouldVerifyTransactions() and a wallet is added and bloom filters are enabled
// Note that the default here means that no tx invs will be received if no wallet is ever added
lock.lock();
try {
boolean spvMode = chain != null && !chain.shouldVerifyTransactions();
boolean willSendFilter = spvMode && peerFilterProviders.size() > 0 && vBloomFilteringEnabled;
ver.relayTxesBeforeFilter = !willSendFilter;
} finally {
lock.unlock();
}
}
/**
* Sets information that identifies this software to remote nodes. This is a convenience wrapper for creating
* a new {@link VersionMessage}, calling {@link VersionMessage#appendToSubVer(String, String, String)} on it,
* and then calling {@link PeerGroup#setVersionMessage(VersionMessage)} on the result of that. See the docs for
* {@link VersionMessage#appendToSubVer(String, String, String)} for information on what the fields should contain.
*/
public void setUserAgent(String name, String version) {
setUserAgent(name, version, null);
}
/** See {@link Peer#addBlocksDownloadedEventListener(BlocksDownloadedEventListener)} */
public void addBlocksDownloadedEventListener(BlocksDownloadedEventListener listener) {
addBlocksDownloadedEventListener(Threading.USER_THREAD, listener);
}
/**
* <p>Adds a listener that will be notified on the given executor when
* blocks are downloaded by the download peer.</p>
* @see Peer#addBlocksDownloadedEventListener(Executor, BlocksDownloadedEventListener)
*/
public void addBlocksDownloadedEventListener(Executor executor, BlocksDownloadedEventListener listener) {
peersBlocksDownloadedEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
for (Peer peer : getConnectedPeers())
peer.addBlocksDownloadedEventListener(executor, listener);
for (Peer peer : getPendingPeers())
peer.addBlocksDownloadedEventListener(executor, listener);
}
/** See {@link Peer#addBlocksDownloadedEventListener(BlocksDownloadedEventListener)} */
public void addChainDownloadStartedEventListener(ChainDownloadStartedEventListener listener) {
addChainDownloadStartedEventListener(Threading.USER_THREAD, listener);
}
/**
* <p>Adds a listener that will be notified on the given executor when
* chain download starts.</p>
*/
public void addChainDownloadStartedEventListener(Executor executor, ChainDownloadStartedEventListener listener) {
peersChainDownloadStartedEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
for (Peer peer : getConnectedPeers())
peer.addChainDownloadStartedEventListener(executor, listener);
for (Peer peer : getPendingPeers())
peer.addChainDownloadStartedEventListener(executor, listener);
}
/** See {@link Peer#addConnectedEventListener(PeerConnectedEventListener)} */
public void addConnectedEventListener(PeerConnectedEventListener listener) {
addConnectedEventListener(Threading.USER_THREAD, listener);
}
/**
* <p>Adds a listener that will be notified on the given executor when
* new peers are connected to.</p>
*/
public void addConnectedEventListener(Executor executor, PeerConnectedEventListener listener) {
peerConnectedEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
for (Peer peer : getConnectedPeers())
peer.addConnectedEventListener(executor, listener);
for (Peer peer : getPendingPeers())
peer.addConnectedEventListener(executor, listener);
}
/** See {@link Peer#addDisconnectedEventListener(PeerDisconnectedEventListener)} */
public void addDisconnectedEventListener(PeerDisconnectedEventListener listener) {
addDisconnectedEventListener(Threading.USER_THREAD, listener);
}
/**
* <p>Adds a listener that will be notified on the given executor when
* peers are disconnected from.</p>
*/
public void addDisconnectedEventListener(Executor executor, PeerDisconnectedEventListener listener) {
peerDisconnectedEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
for (Peer peer : getConnectedPeers())
peer.addDisconnectedEventListener(executor, listener);
for (Peer peer : getPendingPeers())
peer.addDisconnectedEventListener(executor, listener);
}
/** See {@link PeerGroup#addDiscoveredEventListener(Executor, PeerDiscoveredEventListener)} */
public void addDiscoveredEventListener(PeerDiscoveredEventListener listener) {
addDiscoveredEventListener(Threading.USER_THREAD, listener);
}
/**
* <p>Adds a listener that will be notified on the given executor when new
* peers are discovered.</p>
*/
public void addDiscoveredEventListener(Executor executor, PeerDiscoveredEventListener listener) {
peerDiscoveredEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
}
/** See {@link Peer#addGetDataEventListener(GetDataEventListener)} */
public void addGetDataEventListener(GetDataEventListener listener) {
addGetDataEventListener(Threading.USER_THREAD, listener);
}
/** See {@link Peer#addGetDataEventListener(Executor, GetDataEventListener)} */
public void addGetDataEventListener(final Executor executor, final GetDataEventListener listener) {
peerGetDataEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
for (Peer peer : getConnectedPeers())
peer.addGetDataEventListener(executor, listener);
for (Peer peer : getPendingPeers())
peer.addGetDataEventListener(executor, listener);
}
/** See {@link Peer#addOnTransactionBroadcastListener(OnTransactionBroadcastListener)} */
public void addOnTransactionBroadcastListener(OnTransactionBroadcastListener listener) {
addOnTransactionBroadcastListener(Threading.USER_THREAD, listener);
}
/** See {@link Peer#addOnTransactionBroadcastListener(OnTransactionBroadcastListener)} */
public void addOnTransactionBroadcastListener(Executor executor, OnTransactionBroadcastListener listener) {
peersTransactionBroadastEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
for (Peer peer : getConnectedPeers())
peer.addOnTransactionBroadcastListener(executor, listener);
for (Peer peer : getPendingPeers())
peer.addOnTransactionBroadcastListener(executor, listener);
}
/** See {@link Peer#addPreMessageReceivedEventListener(PreMessageReceivedEventListener)} */
public void addPreMessageReceivedEventListener(PreMessageReceivedEventListener listener) {
addPreMessageReceivedEventListener(Threading.USER_THREAD, listener);
}
/** See {@link Peer#addPreMessageReceivedEventListener(Executor, PreMessageReceivedEventListener)} */
public void addPreMessageReceivedEventListener(Executor executor, PreMessageReceivedEventListener listener) {
peersPreMessageReceivedEventListeners.add(new ListenerRegistration<>(Objects.requireNonNull(listener), executor));
for (Peer peer : getConnectedPeers())
peer.addPreMessageReceivedEventListener(executor, listener);
for (Peer peer : getPendingPeers())
peer.addPreMessageReceivedEventListener(executor, listener);
}
public boolean removeBlocksDownloadedEventListener(BlocksDownloadedEventListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peersBlocksDownloadedEventListeners);
for (Peer peer : getConnectedPeers())
peer.removeBlocksDownloadedEventListener(listener);
for (Peer peer : getPendingPeers())
peer.removeBlocksDownloadedEventListener(listener);
return result;
}
public boolean removeChainDownloadStartedEventListener(ChainDownloadStartedEventListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peersChainDownloadStartedEventListeners);
for (Peer peer : getConnectedPeers())
peer.removeChainDownloadStartedEventListener(listener);
for (Peer peer : getPendingPeers())
peer.removeChainDownloadStartedEventListener(listener);
return result;
}
/** The given event listener will no longer be called with events. */
public boolean removeConnectedEventListener(PeerConnectedEventListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peerConnectedEventListeners);
for (Peer peer : getConnectedPeers())
peer.removeConnectedEventListener(listener);
for (Peer peer : getPendingPeers())
peer.removeConnectedEventListener(listener);
return result;
}
/** The given event listener will no longer be called with events. */
public boolean removeDisconnectedEventListener(PeerDisconnectedEventListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peerDisconnectedEventListeners);
for (Peer peer : getConnectedPeers())
peer.removeDisconnectedEventListener(listener);
for (Peer peer : getPendingPeers())
peer.removeDisconnectedEventListener(listener);
return result;
}
/** The given event listener will no longer be called with events. */
public boolean removeDiscoveredEventListener(PeerDiscoveredEventListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peerDiscoveredEventListeners);
return result;
}
/** The given event listener will no longer be called with events. */
public boolean removeGetDataEventListener(GetDataEventListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peerGetDataEventListeners);
for (Peer peer : getConnectedPeers())
peer.removeGetDataEventListener(listener);
for (Peer peer : getPendingPeers())
peer.removeGetDataEventListener(listener);
return result;
}
/** The given event listener will no longer be called with events. */
public boolean removeOnTransactionBroadcastListener(OnTransactionBroadcastListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peersTransactionBroadastEventListeners);
for (Peer peer : getConnectedPeers())
peer.removeOnTransactionBroadcastListener(listener);
for (Peer peer : getPendingPeers())
peer.removeOnTransactionBroadcastListener(listener);
return result;
}
public boolean removePreMessageReceivedEventListener(PreMessageReceivedEventListener listener) {
boolean result = ListenerRegistration.removeFromList(listener, peersPreMessageReceivedEventListeners);
for (Peer peer : getConnectedPeers())
peer.removePreMessageReceivedEventListener(listener);
for (Peer peer : getPendingPeers())
peer.removePreMessageReceivedEventListener(listener);
return result;
}
/**
* Returns a newly allocated list containing the currently connected peers. If all you care about is the count,
* use numConnectedPeers().
*/
public List<Peer> getConnectedPeers() {
lock.lock();
try {
return new ArrayList<>(peers);
} finally {
lock.unlock();
}
}
/**
* Returns a list containing Peers that did not complete connection yet.
*/
public List<Peer> getPendingPeers() {
lock.lock();
try {
return new ArrayList<>(pendingPeers);
} finally {
lock.unlock();
}
}
/**
* Add an address to the list of potential peers to connect to. It won't necessarily be used unless there's a need
* to build new connections to reach the max connection count.
*
* @param peerAddress IP/port to use.
*/
public void addAddress(PeerAddress peerAddress) {
addAddress(peerAddress, 0);
}
/**
* Add an address to the list of potential peers to connect to. It won't necessarily be used unless there's a need
* to build new connections to reach the max connection count.
*
* @param peerAddress IP/port to use.
* @param priority for connecting and being picked as a download peer
*/
public void addAddress(PeerAddress peerAddress, int priority) {
int newMax;
lock.lock();
try {
if (addInactive(peerAddress, priority)) {
newMax = getMaxConnections() + 1;
setMaxConnections(newMax);
}
} finally {
lock.unlock();
}
}
// Adds peerAddress to backoffMap map and inactives queue.
// Returns true if it was added, false if it was already there.
private boolean addInactive(PeerAddress peerAddress, int priority) {
lock.lock();
try {
// Deduplicate
if (backoffMap.containsKey(peerAddress))
return false;
backoffMap.put(peerAddress, new ExponentialBackoff(peerBackoffParams));
if (priority != 0)
priorityMap.put(peerAddress, priority);
inactives.offer(peerAddress);
return true;
} finally {
lock.unlock();
}
}
private int getPriority(PeerAddress peerAddress) {
Integer priority = priorityMap.get(peerAddress);
return priority != null ? priority : 0;
}
/**
* Convenience for connecting only to peers that can serve specific services. It will configure suitable peer
* discoveries.
* @param requiredServices Required services as a bitmask, e.g. {@link Services#NODE_NETWORK}.
*/
public void setRequiredServices(long requiredServices) {
lock.lock();
try {
this.requiredServices = requiredServices;
peerDiscoverers.clear();
addPeerDiscovery(MultiplexingDiscovery.forServices(params.network(), requiredServices));
} finally {
lock.unlock();
}
}
/** Convenience method for {@link #addAddress(PeerAddress)}. */
public void addAddress(InetAddress address) {
addAddress(PeerAddress.simple(address, params.getPort()));
}
/** Convenience method for {@link #addAddress(PeerAddress, int)}. */
public void addAddress(InetAddress address, int priority) {
addAddress(PeerAddress.simple(address, params.getPort()), priority);
}
/**
* Setting this to {@code true} will add addresses discovered via P2P {@code addr} and {@code addrv2} messages to
* the list of potential peers to connect to. This will automatically be set to true if at least one peer discovery
* is added via {@link #addPeerDiscovery(PeerDiscovery)}.
*
* @param discoverPeersViaP2P true if peers should be discovered from the P2P network
*/
public void setDiscoverPeersViaP2P(boolean discoverPeersViaP2P) {
vDiscoverPeersViaP2P = discoverPeersViaP2P;
}
/**
* Add addresses from a discovery source to the list of potential peers to connect to. If max connections has not
* been configured, or set to zero, then it's set to the default at this point.
*/
public void addPeerDiscovery(PeerDiscovery peerDiscovery) {
lock.lock();
try {
if (getMaxConnections() == 0)
setMaxConnections(DEFAULT_CONNECTIONS);
peerDiscoverers.add(peerDiscovery);
} finally {
lock.unlock();
}
setDiscoverPeersViaP2P(true);
}
/** Returns number of discovered peers. */
protected int discoverPeers() {
// Don't hold the lock whilst doing peer discovery: it can take a long time and cause high API latency.
checkState(!lock.isHeldByCurrentThread());
int maxPeersToDiscoverCount = this.vMaxPeersToDiscoverCount;
Duration peerDiscoveryTimeout = this.vPeerDiscoveryTimeout;
Stopwatch watch = Stopwatch.start();
final List<PeerAddress> addressList = new LinkedList<>();
for (PeerDiscovery peerDiscovery : peerDiscoverers /* COW */) {
List<InetSocketAddress> addresses;
try {
addresses = peerDiscovery.getPeers(requiredServices, peerDiscoveryTimeout);
} catch (PeerDiscoveryException e) {
log.warn(e.getMessage());
continue;
}
for (InetSocketAddress address : addresses) addressList.add(PeerAddress.simple(address));
if (addressList.size() >= maxPeersToDiscoverCount) break;
}
if (!addressList.isEmpty()) {
for (PeerAddress address : addressList) {
addInactive(address, 0);
}
final Set<PeerAddress> peersDiscoveredSet = Collections.unmodifiableSet(new HashSet<>(addressList));
for (final ListenerRegistration<PeerDiscoveredEventListener> registration : peerDiscoveredEventListeners /* COW */) {
registration.executor.execute(() -> registration.listener.onPeersDiscovered(peersDiscoveredSet));
}
}
log.info("Peer discovery took {} and returned {} items from {} discoverers",
watch, addressList.size(), peerDiscoverers.size());
return addressList.size();
}
// For testing only
void waitForJobQueue() {
Futures.getUnchecked(executor.submit(Runnables.doNothing()));
}
private int countConnectedAndPendingPeers() {
lock.lock();
try {
return peers.size() + pendingPeers.size();
} finally {
lock.unlock();
}
}
private enum LocalhostCheckState {
NOT_TRIED,
FOUND,
FOUND_AND_CONNECTED,
NOT_THERE
}
private LocalhostCheckState localhostCheckState = LocalhostCheckState.NOT_TRIED;
private boolean maybeCheckForLocalhostPeer() {
checkState(lock.isHeldByCurrentThread());
if (localhostCheckState == LocalhostCheckState.NOT_TRIED) {
// Do a fast blocking connect to see if anything is listening.
try (Socket socket = new Socket()) {
socket.connect(new InetSocketAddress(InetAddress.getLoopbackAddress(), params.getPort()),
Math.toIntExact(vConnectTimeout.toMillis()));
localhostCheckState = LocalhostCheckState.FOUND;
return true;
} catch (IOException e) {
log.info("Localhost peer not detected.");
localhostCheckState = LocalhostCheckState.NOT_THERE;
}
}
return false;
}
/**
* Starts the PeerGroup and begins network activity.
* @return A future that completes when first connection activity has been triggered (note: not first connection made).
*/
public ListenableCompletableFuture<Void> startAsync() {
// This is run in a background thread by the Service implementation.
if (chain == null) {
// Just try to help catch what might be a programming error.
log.warn("Starting up with no attached block chain. Did you forget to pass one to the constructor?");
}
checkState(!vUsedUp, () ->
"cannot start a peer group twice");
vRunning = true;
vUsedUp = true;
executorStartupLatch.countDown();
// We do blocking waits during startup, so run on the executor thread.
CompletableFuture<Void> future = CompletableFuture.runAsync(() -> {
try {
log.info("Starting ...");
channels.startAsync();
channels.awaitRunning();
triggerConnections();
setupPinging();
} catch (Throwable e) {
log.error("Exception when starting up", e); // The executor swallows exceptions :(
}
}, executor);
return ListenableCompletableFuture.of(future);
}
/** Does a blocking startup. */
public void start() {
startAsync().join();
}
public ListenableCompletableFuture<Void> stopAsync() {
checkState(vRunning);
vRunning = false;
CompletableFuture<Void> future = CompletableFuture.runAsync(() -> {
try {
log.info("Stopping ...");
Stopwatch watch = Stopwatch.start();
// The log output this creates can be useful.
setDownloadPeer(null);
// Blocking close of all sockets.
channels.stopAsync();
channels.awaitTerminated();
for (PeerDiscovery peerDiscovery : peerDiscoverers) {
peerDiscovery.shutdown();
}
vRunning = false;
log.info("Stopped, took {}.", watch);
} catch (Throwable e) {
log.error("Exception when shutting down", e); // The executor swallows exceptions :(
}
}, executor);
executor.shutdown();
return ListenableCompletableFuture.of(future);
}
/** Does a blocking stop */
public void stop() {
try {
Stopwatch watch = Stopwatch.start();
stopAsync();
log.info("Awaiting PeerGroup shutdown ...");
executor.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
log.info("... took {}", watch);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
/**
* Gracefully drops all connected peers.
*/
public void dropAllPeers() {
lock.lock();
try {
for (Peer peer : peers)
peer.close();
} finally {
lock.unlock();
}
}
/**
* <p>Link the given wallet to this PeerGroup. This is used for three purposes:</p>
*
* <ol>
* <li>So the wallet receives broadcast transactions.</li>
* <li>Announcing pending transactions that didn't get into the chain yet to our peers.</li>
* <li>Set the fast catchup time using {@link PeerGroup#setFastCatchupTimeSecs(long)}, to optimize chain
* download.</li>
* </ol>
*
* <p>Note that this should be done before chain download commences because if you add a wallet with keys earlier
* than the current chain head, the relevant parts of the chain won't be redownloaded for you.</p>
*
* <p>The Wallet will have an event listener registered on it, so to avoid leaks remember to use
* {@link PeerGroup#removeWallet(Wallet)} on it if you wish to keep the Wallet but lose the PeerGroup.</p>
*/
public void addWallet(Wallet wallet) {
lock.lock();
try {
Objects.requireNonNull(wallet);
checkState(!wallets.contains(wallet));
wallets.add(wallet);
wallet.setTransactionBroadcaster(this);
wallet.addCoinsReceivedEventListener(Threading.SAME_THREAD, walletCoinsReceivedEventListener);
wallet.addCoinsSentEventListener(Threading.SAME_THREAD, walletCoinsSentEventListener);
wallet.addKeyChainEventListener(Threading.SAME_THREAD, walletKeyEventListener);
wallet.addScriptsChangeEventListener(Threading.SAME_THREAD, walletScriptsEventListener);
addPeerFilterProvider(wallet);
for (Peer peer : peers) {
peer.addWallet(wallet);
}
} finally {
lock.unlock();
}
}
/**
* <p>Link the given PeerFilterProvider to this PeerGroup. DO NOT use this for Wallets, use
* {@link PeerGroup#addWallet(Wallet)} instead.</p>
*
* <p>Note that this should be done before chain download commences because if you add a listener with keys earlier
* than the current chain head, the relevant parts of the chain won't be redownloaded for you.</p>
*
* <p>This method invokes {@link PeerGroup#recalculateFastCatchupAndFilter(FilterRecalculateMode)}.
* The return value of this method is the {@code ListenableCompletableFuture} returned by that invocation.</p>
*
* @return a future that completes once each {@code Peer} in this group has had its
* {@code BloomFilter} (re)set.
*/
public ListenableCompletableFuture<BloomFilter> addPeerFilterProvider(PeerFilterProvider provider) {
lock.lock();
try {
Objects.requireNonNull(provider);
checkState(!peerFilterProviders.contains(provider));
// Insert provider at the start. This avoids various concurrency problems that could occur because we need
// all providers to be in a consistent, unchanging state whilst the filter is built. Providers can give
// this guarantee by taking a lock in their begin method, but if we add to the end of the list here, it
// means we establish a lock ordering a > b > c if that's the order the providers were added in. Given that
// the main wallet will usually be first, this establishes an ordering wallet > other-provider, which means
// other-provider can then not call into the wallet itself. Other providers installed by the API user should
// come first so the expected ordering is preserved. This can also manifest itself in providers that use
// synchronous RPCs into an actor instead of locking, but the same issue applies.
peerFilterProviders.add(0, provider);
// Don't bother downloading block bodies before the oldest keys in all our wallets. Make sure we recalculate
// if a key is added. Of course, by then we may have downloaded the chain already. Ideally adding keys would
// automatically rewind the block chain and redownload the blocks to find transactions relevant to those keys,
// all transparently and in the background. But we are a long way from that yet.
ListenableCompletableFuture<BloomFilter> future = recalculateFastCatchupAndFilter(FilterRecalculateMode.SEND_IF_CHANGED);
updateVersionMessageRelayTxesBeforeFilter(getVersionMessage());
return future;
} finally {
lock.unlock();
}
}
/**
* Opposite of {@link #addPeerFilterProvider(PeerFilterProvider)}. Again, don't use this for wallets. Does not
* trigger recalculation of the filter.
*/
public void removePeerFilterProvider(PeerFilterProvider provider) {
lock.lock();
try {
Objects.requireNonNull(provider);
checkArgument(peerFilterProviders.remove(provider));
} finally {
lock.unlock();
}
}
/**
* Unlinks the given wallet so it no longer receives broadcast transactions or has its transactions announced.
*/
public void removeWallet(Wallet wallet) {
wallets.remove(Objects.requireNonNull(wallet));
peerFilterProviders.remove(wallet);
wallet.removeCoinsReceivedEventListener(walletCoinsReceivedEventListener);
wallet.removeCoinsSentEventListener(walletCoinsSentEventListener);
wallet.removeKeyChainEventListener(walletKeyEventListener);
wallet.removeScriptsChangeEventListener(walletScriptsEventListener);
wallet.setTransactionBroadcaster(null);
for (Peer peer : peers) {
peer.removeWallet(wallet);
}
}
public enum FilterRecalculateMode {
SEND_IF_CHANGED,
FORCE_SEND_FOR_REFRESH,
DONT_SEND,
}
private final Map<FilterRecalculateMode, ListenableCompletableFuture<BloomFilter>> inFlightRecalculations = Maps.newHashMap();
/**
* Recalculates the bloom filter given to peers as well as the timestamp after which full blocks are downloaded
* (instead of only headers). Note that calls made one after another may return the same future, if the request
* wasn't processed yet (i.e. calls are deduplicated).
*
* @param mode In what situations to send the filter to connected peers.
* @return a future that completes once the filter has been calculated (note: this does not mean acknowledged by remote peers).
*/
public ListenableCompletableFuture<BloomFilter> recalculateFastCatchupAndFilter(final FilterRecalculateMode mode) {
final ListenableCompletableFuture<BloomFilter> future = new ListenableCompletableFuture<>();
synchronized (inFlightRecalculations) {
if (inFlightRecalculations.get(mode) != null)
return inFlightRecalculations.get(mode);
inFlightRecalculations.put(mode, future);
}
Runnable command = new Runnable() {
@Override
public void run() {
try {
go();
} catch (Throwable e) {
log.error("Exception when trying to recalculate Bloom filter", e); // The executor swallows exceptions :(
}
}
public void go() {
checkState(!lock.isHeldByCurrentThread());
// Fully verifying mode doesn't use this optimization (it can't as it needs to see all transactions).
if ((chain != null && chain.shouldVerifyTransactions()) || !vBloomFilteringEnabled)
return;
// We only ever call bloomFilterMerger.calculate on jobQueue, so we cannot be calculating two filters at once.
FilterMerger.Result result = bloomFilterMerger.calculate(Collections.unmodifiableList(peerFilterProviders /* COW */));
boolean send;
switch (mode) {
case SEND_IF_CHANGED:
send = result.changed;
break;
case DONT_SEND:
send = false;
break;
case FORCE_SEND_FOR_REFRESH:
send = true;
break;
default:
throw new UnsupportedOperationException();
}
if (send) {
for (Peer peer : peers /* COW */) {
// Only query the mempool if this recalculation request is not in order to lower the observed FP
// rate. There's no point querying the mempool when doing this because the FP rate can only go
// down, and we will have seen all the relevant txns before: it's pointless to ask for them again.
peer.setBloomFilter(result.filter, mode != FilterRecalculateMode.FORCE_SEND_FOR_REFRESH);
}
// Reset the false positive estimate so that we don't send a flood of filter updates
// if the estimate temporarily overshoots our threshold.
if (chain != null)
chain.resetFalsePositiveEstimate();
}
// Do this last so that bloomFilter is already set when it gets called.
setFastCatchupTime(result.earliestKeyTime);
synchronized (inFlightRecalculations) {
inFlightRecalculations.put(mode, null);
}
future.complete(result.filter);
}
};
try {
executor.execute(command);
} catch (RejectedExecutionException e) {
// Can happen during shutdown.
}
return future;
}
/**
* <p>Sets the false positive rate of bloom filters given to peers. The default is {@link #DEFAULT_BLOOM_FILTER_FP_RATE}.</p>
*
* <p>Be careful regenerating the bloom filter too often, as it decreases anonymity because remote nodes can
* compare transactions against both the new and old filters to significantly decrease the false positive rate.</p>
*
* <p>See the docs for {@link BloomFilter#BloomFilter(int, double, int, BloomFilter.BloomUpdate)} for a brief
* explanation of anonymity when using bloom filters.</p>
*/
@Deprecated
public void setBloomFilterFalsePositiveRate(double bloomFilterFPRate) {
lock.lock();
try {
bloomFilterMerger.setBloomFilterFPRate(bloomFilterFPRate);
recalculateFastCatchupAndFilter(FilterRecalculateMode.SEND_IF_CHANGED);
} finally {
lock.unlock();
}
}
/**
* Returns the number of currently connected peers. To be informed when this count changes, use
* {@link PeerConnectedEventListener#onPeerConnected} and {@link PeerDisconnectedEventListener#onPeerDisconnected}.
*/
public int numConnectedPeers() {
return peers.size();
}
/**
* Connect to a peer by creating a channel to the destination address. This should not be
* used normally - let the PeerGroup manage connections through {@link #start()}
*
* @param address destination IP and port.
* @return The newly created Peer object or null if the peer could not be connected.
* Use {@link Peer#getConnectionOpenFuture()} if you
* want a future which completes when the connection is open.
*/
@Nullable
public Peer connectTo(InetSocketAddress address) {
lock.lock();
try {
PeerAddress peerAddress = PeerAddress.simple(address);
backoffMap.put(peerAddress, new ExponentialBackoff(peerBackoffParams));
return connectTo(peerAddress, true, vConnectTimeout);
} finally {
lock.unlock();
}
}
/**
* Helper for forcing a connection to localhost. Useful when using regtest mode. Returns the peer object.
*/
@Nullable
public Peer connectToLocalHost() {
lock.lock();
try {
final PeerAddress localhost = PeerAddress.localhost(params);
backoffMap.put(localhost, new ExponentialBackoff(peerBackoffParams));
return connectTo(localhost, true, vConnectTimeout);
} finally {
lock.unlock();
}
}
/**
* Creates a version message to send, constructs a Peer object and attempts to connect it. Returns the peer on
* success or null on failure.
* @param address Remote network address
* @param incrementMaxConnections Whether to consider this connection an attempt to fill our quota, or something
* explicitly requested.
* @param connectTimeout timeout for establishing the connection to peers
* @return Peer or null.
*/
@Nullable @GuardedBy("lock")
protected Peer connectTo(PeerAddress address, boolean incrementMaxConnections, Duration connectTimeout) {
checkState(lock.isHeldByCurrentThread());
VersionMessage ver = getVersionMessage().duplicate();
ver.bestHeight = chain == null ? 0 : chain.getBestChainHeight();
ver.time = TimeUtils.currentTime().truncatedTo(ChronoUnit.SECONDS);
ver.receivingAddr = new InetSocketAddress(address.getAddr(), address.getPort());
Peer peer = createPeer(address, ver);
peer.addConnectedEventListener(Threading.SAME_THREAD, startupListener);
peer.addDisconnectedEventListener(Threading.SAME_THREAD, startupListener);
peer.setMinProtocolVersion(vMinRequiredProtocolVersion);
pendingPeers.add(peer);
try {
log.info("Attempting connection to {} ({} connected, {} pending, {} max)", address,
peers.size(), pendingPeers.size(), maxConnections);
CompletableFuture<SocketAddress> future = channels.openConnection(address.toSocketAddress(), peer);
if (future.isDone())
InternalUtils.getUninterruptibly(future);
} catch (ExecutionException e) {
Throwable cause = Throwables.getRootCause(e);
log.warn("Failed to connect to " + address + ": " + cause.getMessage());
handlePeerDeath(peer, cause);
return null;
}
peer.setSocketTimeout(connectTimeout);
// When the channel has connected and version negotiated successfully, handleNewPeer will end up being called on
// a worker thread.
if (incrementMaxConnections) {
// We don't use setMaxConnections here as that would trigger a recursive attempt to establish a new
// outbound connection.
maxConnections++;
}
return peer;
}
/** You can override this to customise the creation of {@link Peer} objects. */
@GuardedBy("lock")
protected Peer createPeer(PeerAddress address, VersionMessage ver) {
return new Peer(params, ver, address, chain, requiredServices, downloadTxDependencyDepth);
}
/**
* Sets the timeout between when a connection attempt to a peer begins and when the version message exchange
* completes. This does not apply to currently pending peers.
* @param connectTimeout timeout for estiablishing the connection to peers
*/
public void setConnectTimeout(Duration connectTimeout) {
this.vConnectTimeout = connectTimeout;
}
/** @deprecated use {@link #setConnectTimeout(Duration)} */
@Deprecated
public void setConnectTimeoutMillis(int connectTimeoutMillis) {
setConnectTimeout(Duration.ofMillis(connectTimeoutMillis));
}
/**
* <p>Start downloading the blockchain.</p>
*
* <p>If no peers are currently connected, the download will be started once a peer starts. If the peer dies,
* the download will resume with another peer.</p>
*
* @param listener a listener for chain download events, may not be null
*/
public void startBlockChainDownload(BlockchainDownloadEventListener listener) {
lock.lock();
try {
if (downloadPeer != null) {
if (this.downloadListener != null) {
removeDataEventListenerFromPeer(downloadPeer, this.downloadListener);
}
if (listener != null) {
addDataEventListenerToPeer(Threading.USER_THREAD, downloadPeer, listener);
}
}
this.downloadListener = listener;
} finally {
lock.unlock();
}
}
/**
* Register a data event listener against a single peer (i.e. for blockchain
* download). Handling registration/deregistration on peer death/add is
* outside the scope of these methods.
*/
private static void addDataEventListenerToPeer(Executor executor, Peer peer, BlockchainDownloadEventListener downloadListener) {
peer.addBlocksDownloadedEventListener(executor, downloadListener);
peer.addChainDownloadStartedEventListener(executor, downloadListener);
}
/**
* Remove a registered data event listener against a single peer (i.e. for
* blockchain download). Handling registration/deregistration on peer death/add is
* outside the scope of these methods.
*/
private static void removeDataEventListenerFromPeer(Peer peer, BlockchainDownloadEventListener listener) {
peer.removeBlocksDownloadedEventListener(listener);
peer.removeChainDownloadStartedEventListener(listener);
}
/**
* Download the blockchain from peers. Convenience that uses a {@link DownloadProgressTracker} for you.<p>
*
* This method waits until the download is complete. "Complete" is defined as downloading
* from at least one peer all the blocks that are in that peer's inventory.
*/
public void downloadBlockChain() {
DownloadProgressTracker listener = new DownloadProgressTracker();
startBlockChainDownload(listener);
try {
listener.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
protected void handleNewPeer(final Peer peer) {
int newSize = -1;
lock.lock();
try {
groupBackoff.trackSuccess();
backoffMap.get(peer.getAddress()).trackSuccess();
// Sets up the newly connected peer so it can do everything it needs to.
pendingPeers.remove(peer);
peers.add(peer);
newSize = peers.size();
log.info("{}: New peer ({} connected, {} pending, {} max)", peer, newSize, pendingPeers.size(), maxConnections);
// Give the peer a filter that can be used to probabilistically drop transactions that
// aren't relevant to our wallet. We may still receive some false positives, which is
// OK because it helps improve wallet privacy. Old nodes will just ignore the message.
if (bloomFilterMerger.getLastFilter() != null) peer.setBloomFilter(bloomFilterMerger.getLastFilter());
peer.setDownloadData(false);
// TODO: The peer should calculate the fast catchup time from the added wallets here.
for (Wallet wallet : wallets)
peer.addWallet(wallet);
if (downloadPeer == null && newSize > maxConnections / 2) {
Peer newDownloadPeer = selectDownloadPeer(peers);
if (newDownloadPeer != null) {
setDownloadPeer(newDownloadPeer);
// Kick off chain download if we aren't already doing it.
boolean shouldDownloadChain = downloadListener != null && chain != null;
if (shouldDownloadChain) {
startBlockChainDownloadFromPeer(downloadPeer);
}
} else {
log.info("Not yet setting download peer because there is no clear candidate.");
}
}
// Make sure the peer knows how to upload transactions that are requested from us.
peer.addBlocksDownloadedEventListener(Threading.SAME_THREAD, peerListener);
peer.addGetDataEventListener(Threading.SAME_THREAD, peerListener);
// Discover other peers.
peer.addAddressEventListener(Threading.SAME_THREAD, peerListener);
// And set up event listeners for clients. This will allow them to find out about new transactions and blocks.
for (ListenerRegistration<BlocksDownloadedEventListener> registration : peersBlocksDownloadedEventListeners)
peer.addBlocksDownloadedEventListener(registration.executor, registration.listener);
for (ListenerRegistration<ChainDownloadStartedEventListener> registration : peersChainDownloadStartedEventListeners)
peer.addChainDownloadStartedEventListener(registration.executor, registration.listener);
for (ListenerRegistration<PeerConnectedEventListener> registration : peerConnectedEventListeners)
peer.addConnectedEventListener(registration.executor, registration.listener);
// We intentionally do not add disconnect listeners to peers
for (ListenerRegistration<GetDataEventListener> registration : peerGetDataEventListeners)
peer.addGetDataEventListener(registration.executor, registration.listener);
for (ListenerRegistration<OnTransactionBroadcastListener> registration : peersTransactionBroadastEventListeners)
peer.addOnTransactionBroadcastListener(registration.executor, registration.listener);
for (ListenerRegistration<PreMessageReceivedEventListener> registration : peersPreMessageReceivedEventListeners)
peer.addPreMessageReceivedEventListener(registration.executor, registration.listener);
} finally {
lock.unlock();
}
final int fNewSize = newSize;
for (final ListenerRegistration<PeerConnectedEventListener> registration : peerConnectedEventListeners) {
registration.executor.execute(() -> registration.listener.onPeerConnected(peer, fNewSize));
}
// Discovery more peers.
if (vDiscoverPeersViaP2P)
peer.sendMessage(new GetAddrMessage());
}
@Nullable private volatile ScheduledFuture<?> vPingTask;
@SuppressWarnings("NonAtomicOperationOnVolatileField")
private void setupPinging() {
if (getPingIntervalMsec() <= 0)
return; // Disabled.
vPingTask = executor.scheduleAtFixedRate(() -> {
try {
if (getPingIntervalMsec() <= 0) {
ScheduledFuture<?> task = vPingTask;
if (task != null) {
task.cancel(false);
vPingTask = null;
}
return; // Disabled.
}
for (Peer peer : getConnectedPeers()) {
peer.sendPing();
}
} catch (Throwable e) {
log.error("Exception in ping loop", e); // The executor swallows exceptions :(
}
}, getPingIntervalMsec(), getPingIntervalMsec(), TimeUnit.MILLISECONDS);
}
private void setDownloadPeer(@Nullable Peer peer) {
lock.lock();
try {
if (downloadPeer == peer)
return;
if (downloadPeer != null) {
log.info("Unsetting download peer: {}", downloadPeer);
if (downloadListener != null) {
removeDataEventListenerFromPeer(downloadPeer, downloadListener);
}
downloadPeer.setDownloadData(false);
}
downloadPeer = peer;
if (downloadPeer != null) {
log.info("Setting download peer: {}", downloadPeer);
if (downloadListener != null) {
addDataEventListenerToPeer(Threading.SAME_THREAD, peer, downloadListener);
}
downloadPeer.setDownloadData(true);
if (chain != null)
downloadPeer.setFastDownloadParameters(bloomFilterMerger.getLastFilter() != null, fastCatchupTime);
}
} finally {
lock.unlock();
}
}
/** Use "Context.get().getConfidenceTable()" instead */
@Deprecated @Nullable
public TxConfidenceTable getMemoryPool() {
return Context.get().getConfidenceTable();
}
/**
* Tells the {@link PeerGroup} to download only block headers before a certain time and bodies after that. Call this
* before starting block chain download.
* Do not use a {@code time > NOW - 1} block, as it will break some block download logic.
*/
public void setFastCatchupTime(Instant fastCatchupTime) {
lock.lock();
try {
checkState(chain == null || !chain.shouldVerifyTransactions(), () ->
"fast catchup is incompatible with fully verifying");
this.fastCatchupTime = fastCatchupTime;
if (downloadPeer != null) {
downloadPeer.setFastDownloadParameters(bloomFilterMerger.getLastFilter() != null, fastCatchupTime);
}
} finally {
lock.unlock();
}
}
/** @deprecated use {@link #setFastCatchupTime(Instant)} */
@Deprecated
public void setFastCatchupTimeSecs(long fastCatchupTimeSecs) {
setFastCatchupTime(Instant.ofEpochSecond(fastCatchupTimeSecs));
}
/**
* Returns the current fast catchup time. The contents of blocks before this time won't be downloaded as they
* cannot contain any interesting transactions. If you use {@link PeerGroup#addWallet(Wallet)} this just returns
* the min of the wallets earliest key times.
* @return a time in seconds since the epoch
*/
public Instant getFastCatchupTime() {
lock.lock();
try {
return fastCatchupTime;
} finally {
lock.unlock();
}
}
/** @deprecated use {@link #getFastCatchupTime()} */
@Deprecated
public long getFastCatchupTimeSecs() {
return getFastCatchupTime().getEpochSecond();
}
protected void handlePeerDeath(final Peer peer, @Nullable Throwable exception) {
// Peer deaths can occur during startup if a connect attempt after peer discovery aborts immediately.
if (!isRunning()) return;
int numPeers;
int numConnectedPeers = 0;
lock.lock();
try {
pendingPeers.remove(peer);
peers.remove(peer);
PeerAddress address = peer.getAddress();
log.info("{}: Peer died ({} connected, {} pending, {} max)", address, peers.size(), pendingPeers.size(), maxConnections);
if (peer == downloadPeer) {
log.info("Download peer died. Picking a new one.");
setDownloadPeer(null);
// Pick a new one and possibly tell it to download the chain.
final Peer newDownloadPeer = selectDownloadPeer(peers);
if (newDownloadPeer != null) {
setDownloadPeer(newDownloadPeer);
if (downloadListener != null) {
startBlockChainDownloadFromPeer(newDownloadPeer);
}
}
}
numPeers = peers.size() + pendingPeers.size();
numConnectedPeers = peers.size();
groupBackoff.trackFailure();
if (exception instanceof NoRouteToHostException) {
if (address.getAddr() instanceof Inet6Address && !ipv6Unreachable) {
ipv6Unreachable = true;
log.warn("IPv6 peer connect failed due to routing failure, ignoring IPv6 addresses from now on");
}
} else {
backoffMap.get(address).trackFailure();
// Put back on inactive list
inactives.offer(address);
}
if (numPeers < getMaxConnections()) {
triggerConnections();
}
} finally {
lock.unlock();
}
peer.removeAddressEventListener(peerListener);
peer.removeBlocksDownloadedEventListener(peerListener);
peer.removeGetDataEventListener(peerListener);
for (Wallet wallet : wallets) {
peer.removeWallet(wallet);
}
final int fNumConnectedPeers = numConnectedPeers;
for (ListenerRegistration<BlocksDownloadedEventListener> registration: peersBlocksDownloadedEventListeners)
peer.removeBlocksDownloadedEventListener(registration.listener);
for (ListenerRegistration<ChainDownloadStartedEventListener> registration: peersChainDownloadStartedEventListeners)
peer.removeChainDownloadStartedEventListener(registration.listener);
for (ListenerRegistration<GetDataEventListener> registration: peerGetDataEventListeners)
peer.removeGetDataEventListener(registration.listener);
for (ListenerRegistration<PreMessageReceivedEventListener> registration: peersPreMessageReceivedEventListeners)
peer.removePreMessageReceivedEventListener(registration.listener);
for (ListenerRegistration<OnTransactionBroadcastListener> registration : peersTransactionBroadastEventListeners)
peer.removeOnTransactionBroadcastListener(registration.listener);
for (final ListenerRegistration<PeerDisconnectedEventListener> registration : peerDisconnectedEventListeners) {
registration.executor.execute(() -> registration.listener.onPeerDisconnected(peer, fNumConnectedPeers));
peer.removeDisconnectedEventListener(registration.listener);
}
}
@GuardedBy("lock") private int stallPeriodSeconds = 10;
@GuardedBy("lock") private int stallMinSpeedBytesSec = Block.HEADER_SIZE * 10;
/**
* Configures the stall speed: the speed at which a peer is considered to be serving us the block chain
* unacceptably slowly. Once a peer has served us data slower than the given data rate for the given
* number of seconds, it is considered stalled and will be disconnected, forcing the chain download to continue
* from a different peer. The defaults are chosen conservatively, but if you are running on a platform that is
* CPU constrained or on a very slow network e.g. EDGE, the default settings may need adjustment to
* avoid false stalls.
*
* @param periodSecs How many seconds the download speed must be below blocksPerSec, defaults to 10.
* @param bytesPerSecond Download speed (only blocks/txns count) must be consistently below this for a stall, defaults to the bandwidth required for 10 block headers per second.
*/
public void setStallThreshold(int periodSecs, int bytesPerSecond) {
lock.lock();
try {
stallPeriodSeconds = periodSecs;
stallMinSpeedBytesSec = bytesPerSecond;
} finally {
lock.unlock();
}
}
private class ChainDownloadSpeedCalculator implements BlocksDownloadedEventListener, Runnable {
private int blocksInLastSecond, txnsInLastSecond, origTxnsInLastSecond;
private long bytesInLastSecond;
// If we take more stalls than this, we assume we're on some kind of terminally slow network and the
// stall threshold just isn't set properly. We give up on stall disconnects after that.
private int maxStalls = 3;
// How many seconds the peer has until we start measuring its speed.
private int warmupSeconds = -1;
// Used to calculate a moving average.
private long[] samples;
private int cursor;
private boolean syncDone;
private final Logger log = LoggerFactory.getLogger(ChainDownloadSpeedCalculator.class);
@Override
public synchronized void onBlocksDownloaded(Peer peer, Block block, @Nullable FilteredBlock filteredBlock, int blocksLeft) {
blocksInLastSecond++;
bytesInLastSecond += Block.HEADER_SIZE;
List<Transaction> blockTransactions = block.getTransactions();
// This whole area of the type hierarchy is a mess.
int txCount = (blockTransactions != null ? countAndMeasureSize(blockTransactions) : 0) +
(filteredBlock != null ? countAndMeasureSize(filteredBlock.getAssociatedTransactions().values()) : 0);
txnsInLastSecond = txnsInLastSecond + txCount;
if (filteredBlock != null)
origTxnsInLastSecond += filteredBlock.getTransactionCount();
}
private int countAndMeasureSize(Collection<Transaction> transactions) {
for (Transaction transaction : transactions)
bytesInLastSecond += transaction.messageSize();
return transactions.size();
}
@Override
public void run() {
try {
calculate();
} catch (Throwable e) {
log.error("Error in speed calculator", e);
}
}
private void calculate() {
int minSpeedBytesPerSec;
int period;
lock.lock();
try {
minSpeedBytesPerSec = stallMinSpeedBytesSec;
period = stallPeriodSeconds;
} finally {
lock.unlock();
}
synchronized (this) {
if (samples == null || samples.length != period) {
samples = new long[period];
// *2 because otherwise a single low sample could cause an immediate disconnect which is too harsh.
Arrays.fill(samples, minSpeedBytesPerSec * 2);
warmupSeconds = 15;
}
int chainHeight = chain != null ? chain.getBestChainHeight() : -1;
int mostCommonChainHeight = getMostCommonChainHeight();
if (!syncDone && mostCommonChainHeight > 0 && chainHeight >= mostCommonChainHeight) {
log.info("End of sync detected at height {}.", chainHeight);
syncDone = true;
}
if (!syncDone) {
// Calculate the moving average.
samples[cursor++] = bytesInLastSecond;
if (cursor == samples.length) cursor = 0;
long sampleSum = 0;
for (long sample : samples) sampleSum += sample;
final float average = (float) sampleSum / samples.length;
String statsString = String.format(Locale.US,
"%d blocks/sec, %d tx/sec, %d pre-filtered tx/sec, avg/last %.2f/%.2f kilobytes per sec, chain/common height %d/%d",
blocksInLastSecond, txnsInLastSecond, origTxnsInLastSecond, average / 1024.0,
bytesInLastSecond / 1024.0, chainHeight, mostCommonChainHeight);
String thresholdString = String.format(Locale.US, "(threshold <%.2f KB/sec for %d seconds)",
minSpeedBytesPerSec / 1024.0, samples.length);
if (maxStalls <= 0) {
log.info(statsString + ", stall disabled " + thresholdString);
} else if (warmupSeconds > 0) {
warmupSeconds--;
if (bytesInLastSecond > 0)
log.info(statsString
+ String.format(Locale.US, " (warming up %d more seconds)", warmupSeconds));
} else if (average < minSpeedBytesPerSec) {
log.info(statsString + ", STALLED " + thresholdString);
maxStalls--;
if (maxStalls == 0) {
// We could consider starting to drop the Bloom filtering FP rate at this point, because
// we tried a bunch of peers and no matter what we don't seem to be able to go any faster.
// This implies we're bandwidth bottlenecked and might want to start using bandwidth
// more effectively. Of course if there's a MITM that is deliberately throttling us,
// this is a good way to make us take away all the FPs from our Bloom filters ... but
// as they don't give us a whole lot of privacy either way that's not inherently a big
// deal.
log.warn("This network seems to be slower than the requested stall threshold - won't do stall disconnects any more.");
} else {
Peer peer = getDownloadPeer();
log.warn(String.format(Locale.US,
"Chain download stalled: received %.2f KB/sec for %d seconds, require average of %.2f KB/sec, disconnecting %s, %d stalls left",
average / 1024.0, samples.length, minSpeedBytesPerSec / 1024.0, peer, maxStalls));
peer.close();
// Reset the sample buffer and give the next peer time to get going.
samples = null;
warmupSeconds = period;
}
} else {
log.info(statsString + ", not stalled " + thresholdString);
}
}
blocksInLastSecond = 0;
txnsInLastSecond = 0;
origTxnsInLastSecond = 0;
bytesInLastSecond = 0;
}
}
}
@Nullable private ChainDownloadSpeedCalculator chainDownloadSpeedCalculator;
// For testing only
void startBlockChainDownloadFromPeer(Peer peer) {
lock.lock();
try {
setDownloadPeer(peer);
if (chainDownloadSpeedCalculator == null) {
// Every second, run the calculator which will log how fast we are downloading the chain.
chainDownloadSpeedCalculator = new ChainDownloadSpeedCalculator();
executor.scheduleAtFixedRate(chainDownloadSpeedCalculator, 1, 1, TimeUnit.SECONDS);
}
peer.addBlocksDownloadedEventListener(Threading.SAME_THREAD, chainDownloadSpeedCalculator);
// startBlockChainDownload will setDownloadData(true) on itself automatically.
peer.startBlockChainDownload();
} finally {
lock.unlock();
}
}
/**
* Returns a future that is triggered when the number of connected peers is equal to the given number of
* peers. By using this with {@link PeerGroup#getMaxConnections()} you can wait until the
* network is fully online. To block immediately, just call get() on the result. Just calls
* {@link #waitForPeersOfVersion(int, long)} with zero as the protocol version.
*
* @param numPeers How many peers to wait for.
* @return a future that will be triggered when the number of connected peers is greater than or equals numPeers
*/
public ListenableCompletableFuture<List<Peer>> waitForPeers(final int numPeers) {
return waitForPeersOfVersion(numPeers, 0);
}
/**
* Returns a future that is triggered when there are at least the requested number of connected peers that support
* the given protocol version or higher. To block immediately, just call get() on the result.
*
* @param numPeers How many peers to wait for.
* @param protocolVersion The protocol version the awaited peers must implement (or better).
* @return a future that will be triggered when the number of connected peers implementing protocolVersion or higher is greater than or equals numPeers
*/
public ListenableCompletableFuture<List<Peer>> waitForPeersOfVersion(final int numPeers, final long protocolVersion) {
List<Peer> foundPeers = findPeersOfAtLeastVersion(protocolVersion);
if (foundPeers.size() >= numPeers) {
ListenableCompletableFuture<List<Peer>> f = new ListenableCompletableFuture<>();
f.complete(foundPeers);
return f;
}
final ListenableCompletableFuture<List<Peer>> future = new ListenableCompletableFuture<List<Peer>>();
addConnectedEventListener(new PeerConnectedEventListener() {
@Override
public void onPeerConnected(Peer peer, int peerCount) {
final List<Peer> peers = findPeersOfAtLeastVersion(protocolVersion);
if (peers.size() >= numPeers) {
future.complete(peers);
removeConnectedEventListener(this);
}
}
});
return future;
}
/**
* Returns an array list of peers that implement the given protocol version or better.
*/
public List<Peer> findPeersOfAtLeastVersion(long protocolVersion) {
lock.lock();
try {
ArrayList<Peer> results = new ArrayList<>(peers.size());
for (Peer peer : peers)
if (peer.getPeerVersionMessage().clientVersion >= protocolVersion)
results.add(peer);
return results;
} finally {
lock.unlock();
}
}
/**
* Returns a future that is triggered when there are at least the requested number of connected peers that support
* the given protocol version or higher. To block immediately, just call get() on the result.
*
* @param numPeers How many peers to wait for.
* @param mask An integer representing a bit mask that will be ANDed with the peers advertised service masks.
* @return a future that will be triggered when the number of connected peers implementing protocolVersion or higher is greater than or equals numPeers
*/
public ListenableCompletableFuture<List<Peer>> waitForPeersWithServiceMask(final int numPeers, final int mask) {
lock.lock();
try {
List<Peer> foundPeers = findPeersWithServiceMask(mask);
if (foundPeers.size() >= numPeers) {
ListenableCompletableFuture<List<Peer>> f = new ListenableCompletableFuture<>();
f.complete(foundPeers);
return f;
}
final ListenableCompletableFuture<List<Peer>> future = new ListenableCompletableFuture<>();
addConnectedEventListener(new PeerConnectedEventListener() {
@Override
public void onPeerConnected(Peer peer, int peerCount) {
final List<Peer> peers = findPeersWithServiceMask(mask);
if (peers.size() >= numPeers) {
future.complete(peers);
removeConnectedEventListener(this);
}
}
});
return future;
} finally {
lock.unlock();
}
}
/**
* Returns an array list of peers that match the requested service bit mask.
*/
public List<Peer> findPeersWithServiceMask(int mask) {
lock.lock();
try {
ArrayList<Peer> results = new ArrayList<>(peers.size());
for (Peer peer : peers)
if (peer.getPeerVersionMessage().localServices.has(mask))
results.add(peer);
return results;
} finally {
lock.unlock();
}
}
/**
* Returns the number of connections that are required before transactions will be broadcast. If there aren't
* enough, {@link PeerGroup#broadcastTransaction(Transaction)} will wait until the minimum number is reached so
* propagation across the network can be observed. If no value has been set using
* {@link PeerGroup#setMinBroadcastConnections(int)} a default of 80% of whatever
* {@link PeerGroup#getMaxConnections()} returns is used.
*/
public int getMinBroadcastConnections() {
lock.lock();
try {
if (minBroadcastConnections == 0) {
int max = getMaxConnections();
if (max <= 1)
return max;
else
return (int) Math.round(getMaxConnections() * 0.8);
}
return minBroadcastConnections;
} finally {
lock.unlock();
}
}
/**
* See {@link PeerGroup#getMinBroadcastConnections()}.
*/
public void setMinBroadcastConnections(int value) {
lock.lock();
try {
minBroadcastConnections = value;
} finally {
lock.unlock();
}
}
/**
* Calls {@link PeerGroup#broadcastTransaction(Transaction, int, boolean)} with getMinBroadcastConnections() as
* the number of connections to wait for before commencing broadcast. Also, if the transaction has no broadcast
* confirmations yet the peers will be dropped after the transaction has been sent.
*/
@Override
public TransactionBroadcast broadcastTransaction(final Transaction tx) {
return broadcastTransaction(tx, Math.max(1, getMinBroadcastConnections()), true);
}
/**
* <p>Given a transaction, sends it un-announced to one peer and then waits for it to be received back from other
* peers. Once all connected peers have announced the transaction, the future available via the
* {@link TransactionBroadcast#awaitRelayed()} ()} method will be completed. If anything goes
* wrong the exception will be thrown when get() is called, or you can receive it via a callback on the
* {@link ListenableCompletableFuture}. This method returns immediately, so if you want it to block just call get() on the
* result.</p>
*
* <p>Optionally, peers will be dropped after they have been used for broadcasting the transaction and they have
* no broadcast confirmations yet.</p>
*
* <p>Note that if the PeerGroup is limited to only one connection (discovery is not activated) then the future
* will complete as soon as the transaction was successfully written to that peer.</p>
*
* <p>The transaction won't be sent until there are at least minConnections active connections available.
* A good choice for proportion would be between 0.5 and 0.8 but if you want faster transmission during initial
* bringup of the peer group you can lower it.</p>
*
* <p>The returned {@link TransactionBroadcast} object can be used to get progress feedback,
* which is calculated by watching the transaction propagate across the network and be announced by peers.</p>
*/
public TransactionBroadcast broadcastTransaction(final Transaction tx, final int minConnections,
final boolean dropPeersAfterBroadcast) {
// If we don't have a record of where this tx came from already, set it to be ourselves so Peer doesn't end up
// redownloading it from the network redundantly.
if (tx.getConfidence().getSource().equals(TransactionConfidence.Source.UNKNOWN)) {
log.info("Transaction source unknown, setting to SELF: {}", tx.getTxId());
tx.getConfidence().setSource(TransactionConfidence.Source.SELF);
}
final TransactionBroadcast broadcast = new TransactionBroadcast(this, tx);
broadcast.setMinConnections(minConnections);
broadcast.setDropPeersAfterBroadcast(dropPeersAfterBroadcast && tx.getConfidence().numBroadcastPeers() == 0);
// Send the TX to the wallet once we have a successful broadcast.
broadcast.awaitRelayed().whenComplete((bcast, throwable) -> {
if (bcast != null) {
runningBroadcasts.remove(bcast);
// OK, now tell the wallet about the transaction. If the wallet created the transaction then
// it already knows and will ignore this. If it's a transaction we received from
// somebody else via a side channel and are now broadcasting, this will put it into the
// wallet now we know it's valid.
for (Wallet wallet : wallets) {
// Assumption here is there are no dependencies of the created transaction.
//
// We may end up with two threads trying to do this in parallel - the wallet will
// ignore whichever one loses the race.
try {
wallet.receivePending(bcast.transaction(), null);
} catch (VerificationException e) {
throw new RuntimeException(e); // Cannot fail to verify a tx we created ourselves.
}
}
} else {
// This can happen if we get a reject message from a peer.
runningBroadcasts.remove(bcast);
}
});
// Keep a reference to the TransactionBroadcast object. This is important because otherwise, the entire tree
// of objects we just created would become garbage if the user doesn't hold on to the returned future, and
// eventually be collected. This in turn could result in the transaction not being committed to the wallet
// at all.
runningBroadcasts.add(broadcast);
broadcast.broadcastOnly();
return broadcast;
}
/**
* Returns the period between pings for an individual peer. Setting this lower means more accurate and timely ping
* times are available via {@link Peer#lastPingInterval()} but it increases load on the
* remote node. It defaults to {@link PeerGroup#DEFAULT_PING_INTERVAL_MSEC}.
*/
public long getPingIntervalMsec() {
lock.lock();
try {
return pingIntervalMsec;
} finally {
lock.unlock();
}
}
/**
* Sets the period between pings for an individual peer. Setting this lower means more accurate and timely ping
* times are available via {@link Peer#lastPingInterval()} but it increases load on the
* remote node. It defaults to {@link PeerGroup#DEFAULT_PING_INTERVAL_MSEC}.
* Setting the value to be smaller or equals 0 disables pinging entirely, although you can still request one yourself
* using {@link Peer#sendPing()}.
*/
public void setPingIntervalMsec(long pingIntervalMsec) {
lock.lock();
try {
this.pingIntervalMsec = pingIntervalMsec;
ScheduledFuture<?> task = vPingTask;
if (task != null)
task.cancel(false);
setupPinging();
} finally {
lock.unlock();
}
}
/**
* If a peer is connected to that claims to speak a protocol version lower than the given version, it will
* be disconnected and another one will be tried instead.
*/
public void setMinRequiredProtocolVersion(int minRequiredProtocolVersion) {
this.vMinRequiredProtocolVersion = minRequiredProtocolVersion;
}
/** The minimum protocol version required: defaults to the version required for Bloom filtering. */
public int getMinRequiredProtocolVersion() {
return vMinRequiredProtocolVersion;
}
/**
* Returns our peers most commonly reported chain height.
* If the most common heights are tied, or no peers are connected, returns {@code 0}.
*/
public int getMostCommonChainHeight() {
lock.lock();
try {
return getMostCommonChainHeight(this.peers);
} finally {
lock.unlock();
}
}
/**
* Returns most commonly reported chain height from the given list of {@link Peer}s.
* If the most common heights are tied, or no peers are connected, returns {@code 0}.
*/
public static int getMostCommonChainHeight(final List<Peer> peers) {
if (peers.isEmpty())
return 0;
List<Integer> heights = new ArrayList<>(peers.size());
for (Peer peer : peers) heights.add((int) peer.getBestHeight());
return maxOfMostFreq(heights);
}
private static class Pair implements Comparable<Pair> {
final int item;
int count = 0;
public Pair(int item) { this.item = item; }
// note that in this implementation compareTo() is not consistent with equals()
@Override public int compareTo(Pair o) { return -Integer.compare(count, o.count); }
}
static int maxOfMostFreq(List<Integer> items) {
if (items.isEmpty())
return 0;
// This would be much easier in a functional language (or in Java 8).
items = Ordering.natural().reverse().sortedCopy(items);
LinkedList<Pair> pairs = new LinkedList<>();
pairs.add(new Pair(items.get(0)));
for (int item : items) {
Pair pair = pairs.getLast();
if (pair.item != item)
pairs.add((pair = new Pair(item)));
pair.count++;
}
// pairs now contains a uniquified list of the sorted inputs, with counts for how often that item appeared.
// Now sort by how frequently they occur, and pick the most frequent. If the first place is tied between two,
// don't pick any.
Collections.sort(pairs);
final Pair firstPair = pairs.get(0);
if (pairs.size() == 1)
return firstPair.item;
final Pair secondPair = pairs.get(1);
if (firstPair.count > secondPair.count)
return firstPair.item;
checkState(firstPair.count == secondPair.count);
return 0;
}
/**
* Given a list of Peers, return a Peer to be used as the download peer. If you don't want PeerGroup to manage
* download peer statuses for you, just override this and always return null.
*/
@Nullable
protected Peer selectDownloadPeer(List<Peer> peers) {
// Characteristics to select for in order of importance:
// - Chain height is reasonable (majority of nodes)
// - High enough protocol version for the features we want (but we'll settle for less)
// - Randomly, to try and spread the load.
if (peers.isEmpty())
return null;
int mostCommonChainHeight = getMostCommonChainHeight(peers);
// Make sure we don't select a peer if there is no consensus about block height.
if (mostCommonChainHeight == 0)
return null;
// Only select peers that announce the minimum protocol and services and that we think is fully synchronized.
List<Peer> candidates = new LinkedList<>();
int highestPriority = Integer.MIN_VALUE;
final int MINIMUM_VERSION = ProtocolVersion.WITNESS_VERSION.intValue();
for (Peer peer : peers) {
final VersionMessage versionMessage = peer.getPeerVersionMessage();
if (versionMessage.clientVersion < MINIMUM_VERSION)
continue;
if (!versionMessage.services().has(Services.NODE_NETWORK))
continue;
if (!versionMessage.services().has(Services.NODE_WITNESS))
continue;
final long peerHeight = peer.getBestHeight();
if (peerHeight < mostCommonChainHeight || peerHeight > mostCommonChainHeight + 1)
continue;
candidates.add(peer);
highestPriority = Math.max(highestPriority, getPriority(peer.peerAddress));
}
if (candidates.isEmpty())
return null;
// If there is a difference in priority, consider only the highest.
for (Iterator<Peer> i = candidates.iterator(); i.hasNext(); ) {
Peer peer = i.next();
if (getPriority(peer.peerAddress) < highestPriority)
i.remove();
}
// Random poll.
int index = (int) (Math.random() * candidates.size());
return candidates.get(index);
}
/**
* Returns the currently selected download peer. Bear in mind that it may have changed as soon as this method
* returns. Can return null if no peer was selected.
*/
public Peer getDownloadPeer() {
lock.lock();
try {
return downloadPeer;
} finally {
lock.unlock();
}
}
/**
* Returns the maximum number of {@link Peer}s to discover. This maximum is checked after
* each {@link PeerDiscovery} so this max number can be surpassed.
* @return the maximum number of peers to discover
*/
public int getMaxPeersToDiscoverCount() {
return vMaxPeersToDiscoverCount;
}
/**
* Sets the maximum number of {@link Peer}s to discover. This maximum is checked after
* each {@link PeerDiscovery} so this max number can be surpassed.
* @param maxPeersToDiscoverCount the maximum number of peers to discover
*/
public void setMaxPeersToDiscoverCount(int maxPeersToDiscoverCount) {
this.vMaxPeersToDiscoverCount = maxPeersToDiscoverCount;
}
/** See {@link #setUseLocalhostPeerWhenPossible(boolean)} */
public boolean getUseLocalhostPeerWhenPossible() {
lock.lock();
try {
return useLocalhostPeerWhenPossible;
} finally {
lock.unlock();
}
}
/**
* When true (the default), PeerGroup will attempt to connect to a Bitcoin node running on localhost before
* attempting to use the P2P network. If successful, only localhost will be used. This makes for a simple
* and easy way for a user to upgrade a bitcoinj based app running in SPV mode to fully validating security.
*/
public void setUseLocalhostPeerWhenPossible(boolean useLocalhostPeerWhenPossible) {
lock.lock();
try {
this.useLocalhostPeerWhenPossible = useLocalhostPeerWhenPossible;
} finally {
lock.unlock();
}
}
public boolean isRunning() {
return vRunning;
}
/**
* Can be used to disable Bloom filtering entirely, even in SPV mode. You are very unlikely to need this, it is
* an optimisation for rare cases when full validation is not required but it's still more efficient to download
* full blocks than filtered blocks.
*/
public void setBloomFilteringEnabled(boolean bloomFilteringEnabled) {
this.vBloomFilteringEnabled = bloomFilteringEnabled;
}
/** Returns whether the Bloom filtering protocol optimisation is in use: defaults to true. */
public boolean isBloomFilteringEnabled() {
return vBloomFilteringEnabled;
}
}
| bitcoinj/bitcoinj | core/src/main/java/org/bitcoinj/core/PeerGroup.java |
1,453 | package org.opentripplanner.standalone.server;
import jakarta.ws.rs.HttpMethod;
import jakarta.ws.rs.container.ContainerRequestContext;
import jakarta.ws.rs.container.ContainerRequestFilter;
import jakarta.ws.rs.container.ContainerResponseContext;
import jakarta.ws.rs.container.ContainerResponseFilter;
import jakarta.ws.rs.core.MultivaluedMap;
import jakarta.ws.rs.core.Response;
import java.io.IOException;
/**
* The Same Origin Policy states that JavaScript code (or other scripts) running on a web page may
* not interact with resources originating from sites with a different hostname, protocol, or port
* number.
* <p>
* We used to use JSONP ("JSON with padding") as a way to get around this. Despite being very
* common, this is of course a big hack to defeat a security policy. Modern browsers respect "Cross
* Origin Resource Sharing" (CORS) headers, so we have switched to that system.
*/
class CorsFilter implements ContainerRequestFilter, ContainerResponseFilter {
/**
* CORS request filter. Hijack "preflight" OPTIONS requests before the Jersey resources get them.
* The response will then pass through the CORS response filter on its way back out.
*/
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
if (HttpMethod.OPTIONS.equals(requestContext.getMethod())) {
Response.ResponseBuilder preflightResponse = Response.status(Response.Status.OK);
if (requestContext.getHeaderString("Access-Control-Request-Headers") != null) {
preflightResponse.header(
"Access-Control-Allow-Headers",
requestContext.getHeaderString("Access-Control-Request-Headers")
);
}
if (requestContext.getHeaderString("Access-Control-Request-Method") != null) {
preflightResponse.header("Access-Control-Allow-Method", "GET,POST");
}
// Allow caching of pre-flight options for up to an hour
preflightResponse.header("Access-Control-Max-Age", "3600");
requestContext.abortWith(preflightResponse.build());
}
}
/**
* CORS response filter. Allow requests from anywhere. Just echo back the contents of the Origin
* header. Allow credentials if the transport layer is secure.
*/
@Override
public void filter(ContainerRequestContext request, ContainerResponseContext response)
throws IOException {
String origin = request.getHeaderString("Origin"); // case insensitive
MultivaluedMap<String, Object> headers = response.getHeaders();
headers.add("Access-Control-Allow-Origin", origin);
boolean secureTransport = request.getSecurityContext().isSecure();
headers.add("Access-Control-Allow-Credentials", secureTransport);
}
}
| opentripplanner/OpenTripPlanner | src/main/java/org/opentripplanner/standalone/server/CorsFilter.java |
1,454 | package org.jruby.util;
import java.nio.ByteOrder;
import org.jruby.util.unsafe.UnsafeHolder;
import sun.misc.Unsafe;
/**
* SipHash implementation with hand inlining the SIPROUND.
*
* To know details about SipHash, see;
* "a fast short-input PRF" https://www.131002.net/siphash/
*
* @author [email protected]
*/
public class SipHashInline {
public static long hash24(long k0, long k1, byte[] data) {
return hash24(k0, k1, data, 0, data.length);
}
public static long hash24(long k0, long k1, byte[] src, int offset, int length) {
long v0 = 0x736f6d6570736575L ^ k0;
long v1 = 0x646f72616e646f6dL ^ k1;
long v2 = 0x6c7967656e657261L ^ k0;
long v3 = 0x7465646279746573L ^ k1;
long m;
int last = offset + length / 8 * 8;
int i = offset;
if (offset < 0) {
throw new ArrayIndexOutOfBoundsException(offset);
} else if (offset + length > src.length) {
throw new ArrayIndexOutOfBoundsException(src.length);
}
// processing 8 bytes blocks in data
while (i < last) {
m = LongReader.INSTANCE.getLong(src, i);
i += 8;
// MSGROUND {
v3 ^= m;
/* SIPROUND with hand reordering
*
* SIPROUND in siphash24.c:
* A: v0 += v1;
* B: v1=ROTL(v1,13);
* C: v1 ^= v0;
* D: v0=ROTL(v0,32);
* E: v2 += v3;
* F: v3=ROTL(v3,16);
* G: v3 ^= v2;
* H: v0 += v3;
* I: v3=ROTL(v3,21);
* J: v3 ^= v0;
* K: v2 += v1;
* L: v1=ROTL(v1,17);
* M: v1 ^= v2;
* N: v2=ROTL(v2,32);
*
* Each dependency:
* B$ -> A
* C$ -> A, B
* D$ -> C
* F$ -> E
* G$ -> E, F
* H$ -> D, G
* I$ -> H
* J$ -> H, I
* K$ -> C, G
* L$ -> K
* M$ -> K, L
* N$ -> M
*
* Dependency graph:
* D$ -> C -> B -> A
* G$ -> F -> E
* J$ -> I -> H -> D, G
* N$ -> M -> L -> K -> C, G
*
* Resulting parallel friendly execution order:
* $ -> ABCDHIJ
* $ -> EFGKLMN
*/
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
v0 ^= m;
// }
}
// packing the last block to long, as LE 0-7 bytes + the length in the top byte
m = 0;
for (i = offset + length - 1; i >= last; --i) {
m <<= 8; m |= (long) src[i];
}
m |= (long) length << 56;
// MSGROUND {
v3 ^= m;
for (int j = 0; j < 2; j++) {
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
}
v0 ^= m;
// }
// finishing...
v2 ^= 0xff;
for (int j = 0; j < 4; j++) {
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
}
return v0 ^ v1 ^ v2 ^ v3;
}
private static abstract class LongReader {
public abstract long getLong(byte[] src, int offset);
public static final LongReader INSTANCE = createBestLongReader();
private static LongReader createBestLongReader() {
try {
if (UnsafeHolder.U != null) {
if (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN) {
return new UnsafeLongReader(UnsafeHolder.U);
}
}
} catch (Exception e) {
}
return new FallbackLongReader();
}
private static final class FallbackLongReader extends LongReader {
@Override
public long getLong(byte[] src, int offset) {
return (long) src[offset++] |
(long) src[offset++] << 8 |
(long) src[offset++] << 16 |
(long) src[offset++] << 24 |
(long) src[offset++] << 32 |
(long) src[offset++] << 40 |
(long) src[offset++] << 48 |
(long) src[offset++] << 56 ;
}
}
private static final class UnsafeLongReader extends LongReader {
final Unsafe unsafe;
final int byteArrayBaseOffset;
public UnsafeLongReader(Unsafe unsafe) {
this.unsafe = unsafe;
this.byteArrayBaseOffset = unsafe.arrayBaseOffset(byte[].class);
}
@Override
public final long getLong(byte[] src, int offset) {
return unsafe.getLong(src, byteArrayBaseOffset + (long)offset);
}
}
}
}
| jruby/jruby | core/src/main/java/org/jruby/util/SipHashInline.java |
1,455 | /*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.services.resources;
import jakarta.ws.rs.core.Response;
import org.keycloak.TokenVerifier.Predicate;
import org.keycloak.authentication.AuthenticationProcessor;
import org.keycloak.authentication.ExplainedVerificationException;
import org.keycloak.authentication.actiontoken.ActionTokenContext;
import org.keycloak.authentication.actiontoken.ExplainedTokenVerificationException;
import org.keycloak.common.VerificationException;
import org.keycloak.events.Details;
import org.keycloak.events.Errors;
import org.keycloak.events.EventBuilder;
import org.keycloak.models.SingleUseObjectKeyModel;
import org.keycloak.models.ClientModel;
import org.keycloak.models.KeycloakSession;
import org.keycloak.models.RealmModel;
import org.keycloak.models.SingleUseObjectProvider;
import org.keycloak.models.UserModel;
import org.keycloak.models.UserSessionModel;
import org.keycloak.protocol.oidc.utils.RedirectUtils;
import org.keycloak.representations.JsonWebToken;
import org.keycloak.services.ErrorPageException;
import org.keycloak.services.managers.AuthenticationManager;
import org.keycloak.services.managers.AuthenticationManager.AuthResult;
import org.keycloak.services.messages.Messages;
import org.keycloak.sessions.AuthenticationSessionCompoundId;
import org.keycloak.sessions.AuthenticationSessionModel;
import org.keycloak.sessions.CommonClientSessionModel.Action;
import java.util.Objects;
import java.util.function.Consumer;
import org.jboss.logging.Logger;
/**
*
* @author hmlnarik
*/
public class LoginActionsServiceChecks {
private static final Logger LOG = Logger.getLogger(LoginActionsServiceChecks.class.getName());
/**
* This check verifies that user ID (subject) from the token matches
* the one from the authentication session.
*/
public static class AuthenticationSessionUserIdMatchesOneFromToken implements Predicate<JsonWebToken> {
private final ActionTokenContext<?> context;
public AuthenticationSessionUserIdMatchesOneFromToken(ActionTokenContext<?> context) {
this.context = context;
}
@Override
public boolean test(JsonWebToken t) throws VerificationException {
AuthenticationSessionModel authSession = context.getAuthenticationSession();
if (authSession == null || authSession.getAuthenticatedUser() == null
|| ! Objects.equals(t.getSubject(), authSession.getAuthenticatedUser().getId())) {
throw new ExplainedTokenVerificationException(t, Errors.INVALID_TOKEN, Messages.INVALID_USER);
}
return true;
}
}
/**
* Verifies that if authentication session exists and any action is required according to it, then it is
* the expected one.
*
* If there is an action required in the session, furthermore it is not the expected one, and the required
* action is redirection to "required actions", it throws with response performing the redirect to required
* actions.
*/
public static class IsActionRequired implements Predicate<JsonWebToken> {
private final ActionTokenContext<?> context;
private final AuthenticationSessionModel.Action expectedAction;
public IsActionRequired(ActionTokenContext<?> context, Action expectedAction) {
this.context = context;
this.expectedAction = expectedAction;
}
@Override
public boolean test(JsonWebToken t) throws VerificationException {
AuthenticationSessionModel authSession = context.getAuthenticationSession();
if (authSession != null && ! Objects.equals(authSession.getAction(), this.expectedAction.name())) {
if (Objects.equals(AuthenticationSessionModel.Action.REQUIRED_ACTIONS.name(), authSession.getAction())) {
throw new LoginActionsServiceException(
AuthenticationManager.nextActionAfterAuthentication(context.getSession(), authSession,
context.getClientConnection(), context.getRequest(), context.getUriInfo(), context.getEvent()));
}
throw new ExplainedTokenVerificationException(t, Errors.INVALID_TOKEN, Messages.INVALID_CODE);
}
return true;
}
}
/**
* Verifies whether the user given by ID both exists in the current realm. If yes,
* it optionally also injects the user using the given function (e.g. into session context).
*/
public static void checkIsUserValid(KeycloakSession session, RealmModel realm, String userId, Consumer<UserModel> userSetter, EventBuilder event) throws VerificationException {
UserModel user = userId == null ? null : session.users().getUserById(realm, userId);
if (user == null) {
throw new ExplainedVerificationException(Errors.USER_NOT_FOUND, Messages.INVALID_USER);
}
if (! user.isEnabled()) {
throw new ExplainedVerificationException(Errors.USER_DISABLED, Messages.ACCOUNT_DISABLED);
}
AuthResult authResult = AuthenticationManager.authenticateIdentityCookie(session, realm, true);
if (authResult != null) {
UserSessionModel userSession = authResult.getSession();
if (!user.equals(userSession.getUser())) {
// do not allow authenticated users performing actions that are bound to other user and fire an event
// it might be an attempt to hijack a user account or perform actions on behalf of others
// we don't support yet multiple accounts within a same browser session
event.detail(Details.EXISTING_USER, userSession.getUser().getId());
event.error(Errors.DIFFERENT_USER_AUTHENTICATED);
AuthenticationSessionModel authSession = session.getContext().getAuthenticationSession();
throw new ErrorPageException(session, authSession, Response.Status.BAD_REQUEST, Messages.DIFFERENT_USER_AUTHENTICATED, userSession.getUser().getUsername());
}
}
if (userSetter != null) {
userSetter.accept(user);
}
}
/**
* Verifies whether the user given by ID both exists in the current realm. If yes,
* it optionally also injects the user using the given function (e.g. into session context).
*/
public static <T extends JsonWebToken & SingleUseObjectKeyModel> void checkIsUserValid(T token, ActionTokenContext<T> context, EventBuilder event) throws VerificationException {
try {
checkIsUserValid(context.getSession(), context.getRealm(), token.getUserId(), context.getAuthenticationSession()::setAuthenticatedUser, event);
} catch (ExplainedVerificationException ex) {
throw new ExplainedTokenVerificationException(token, ex);
}
}
/**
* Verifies whether the client denoted by client ID in token's {@code iss} ({@code issuedFor})
* field both exists and is enabled.
*/
public static void checkIsClientValid(KeycloakSession session, ClientModel client) throws VerificationException {
if (client == null) {
throw new ExplainedVerificationException(Errors.CLIENT_NOT_FOUND, Messages.UNKNOWN_LOGIN_REQUESTER);
}
if (! client.isEnabled()) {
throw new ExplainedVerificationException(Errors.CLIENT_NOT_FOUND, Messages.LOGIN_REQUESTER_NOT_ENABLED);
}
}
/**
* Verifies whether the client denoted by client ID in token's {@code iss} ({@code issuedFor})
* field both exists and is enabled.
*/
public static <T extends JsonWebToken> void checkIsClientValid(T token, ActionTokenContext<T> context) throws VerificationException {
String clientId = token.getIssuedFor();
AuthenticationSessionModel authSession = context.getAuthenticationSession();
ClientModel client = authSession == null ? null : authSession.getClient();
try {
checkIsClientValid(context.getSession(), client);
if (clientId != null && ! Objects.equals(client.getClientId(), clientId)) {
throw new ExplainedTokenVerificationException(token, Errors.CLIENT_NOT_FOUND, Messages.UNKNOWN_LOGIN_REQUESTER);
}
} catch (ExplainedVerificationException ex) {
throw new ExplainedTokenVerificationException(token, ex);
}
}
/**
* Verifies whether the given redirect URL, when set, is valid for the given client.
*/
public static class IsRedirectValid implements Predicate<JsonWebToken> {
private final ActionTokenContext<?> context;
private final String redirectUri;
public IsRedirectValid(ActionTokenContext<?> context, String redirectUri) {
this.context = context;
this.redirectUri = redirectUri;
}
@Override
public boolean test(JsonWebToken t) throws VerificationException {
if (redirectUri == null) {
return true;
}
ClientModel client = context.getAuthenticationSession().getClient();
if (RedirectUtils.verifyRedirectUri(context.getSession(), redirectUri, client) == null) {
throw new ExplainedTokenVerificationException(t, Errors.INVALID_REDIRECT_URI, Messages.INVALID_REDIRECT_URI);
}
return true;
}
}
/**
* This check verifies that current authentication session is consistent with the one specified in token.
* Examples:
* <ul>
* <li>1. Email from administrator with reset e-mail - token does not contain auth session ID</li>
* <li>2. Email from "verify e-mail" step within flow - token contains auth session ID.</li>
* <li>3. User clicked the link in an e-mail and gets to a new browser - authentication session cookie is not set</li>
* <li>4. User clicked the link in an e-mail while having authentication running - authentication session cookie
* is already set in the browser</li>
* </ul>
*
* <ul>
* <li>For combinations 1 and 3, 1 and 4, and 2 and 3: Requests next step</li>
* <li>For combination 2 and 4:
* <ul>
* <li>If the auth session IDs from token and cookie match, pass</li>
* <li>Else if the auth session from cookie was forked and its parent auth session ID
* matches that of token, replaces current auth session with that of parent and passes</li>
* <li>Else requests restart by throwing RestartFlow exception</li>
* </ul>
* </li>
* </ul>
*
* When the check passes, it also sets the authentication session in token context accordingly.
*
* @param <T>
*/
public static <T extends JsonWebToken> boolean doesAuthenticationSessionFromCookieMatchOneFromToken(
ActionTokenContext<T> context, AuthenticationSessionModel authSessionFromCookie, String authSessionCompoundIdFromToken) throws VerificationException {
if (authSessionCompoundIdFromToken == null) {
return false;
}
if (Objects.equals(AuthenticationSessionCompoundId.fromAuthSession(authSessionFromCookie).getEncodedId(), authSessionCompoundIdFromToken)) {
context.setAuthenticationSession(authSessionFromCookie, false);
return true;
}
// Check if it's forked session. It would have same parent (rootSession) as our browser authenticationSession
String parentTabId = authSessionFromCookie.getAuthNote(AuthenticationProcessor.FORKED_FROM);
if (parentTabId == null) {
return false;
}
AuthenticationSessionModel authSessionFromParent = authSessionFromCookie.getParentSession().getAuthenticationSession(authSessionFromCookie.getClient(), parentTabId);
if (authSessionFromParent == null) {
return false;
}
// It's the correct browser. We won't continue login
// from the login form (browser flow) but from the token's flow
// Don't expire KC_RESTART cookie at this point
LOG.debugf("Switched to forked tab: %s from: %s . Root session: %s", authSessionFromParent.getTabId(), authSessionFromCookie.getTabId(), authSessionFromCookie.getParentSession().getId());
context.setAuthenticationSession(authSessionFromParent, false);
context.setExecutionId(authSessionFromParent.getAuthNote(AuthenticationProcessor.LAST_PROCESSED_EXECUTION));
return true;
}
public static <T extends JsonWebToken & SingleUseObjectKeyModel> void checkTokenWasNotUsedYet(T token, ActionTokenContext<T> context) throws VerificationException {
SingleUseObjectProvider singleUseObjectProvider = context.getSession().singleUseObjects();
if (singleUseObjectProvider.get(token.serializeKey()) != null) {
throw new ExplainedTokenVerificationException(token, Errors.EXPIRED_CODE, Messages.EXPIRED_ACTION);
}
}
}
| keycloak/keycloak | services/src/main/java/org/keycloak/services/resources/LoginActionsServiceChecks.java |
1,456 | /*
* This file is part of WebGoat, an Open Web Application Security Project utility. For details, please see http://www.owasp.org/
*
* Copyright (c) 2002 - 2021 Bruce Mayhew
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU General Public License as published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with this program; if
* not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* Getting Source
* ==============
*
* Source for this application is maintained at https://github.com/WebGoat/WebGoat, a repository for free software projects.
*/
package org.owasp.webgoat;
import java.util.Map;
import org.junit.jupiter.api.Test;
/**
* @author Angel Olle Blazquez
*/
class SessionManagementIT extends IntegrationTest {
private static final String HIJACK_LOGIN_CONTEXT_PATH = "HijackSession/login";
@Test
void hijackSessionTest() {
startLesson("HijackSession");
checkAssignment(
url(HIJACK_LOGIN_CONTEXT_PATH),
Map.of("username", "webgoat", "password", "webgoat"),
false);
}
}
| LoudWatch/WebGoat | src/it/java/org/owasp/webgoat/SessionManagementIntegrationTest.java |
1,457 | /**
*
* Least Diff problem in Choco3.
*
* Minimize the difference ABCDE - FGHIJ
* where A..J is all different in the range 0..9.
*
* The solution is: 50123 - 49876 = 247
*
*
* Choco3 model by Hakan Kjellerstrand ([email protected])
* Also see http://www.hakank.org/choco3/
*
*/
import org.kohsuke.args4j.Option;
import org.slf4j.LoggerFactory;
import samples.AbstractProblem;
import solver.ResolutionPolicy;
import solver.Solver;
import solver.constraints.Constraint;
import solver.constraints.IntConstraintFactory;
import solver.search.strategy.IntStrategyFactory;
import solver.search.loop.monitors.SearchMonitorFactory;
import solver.variables.IntVar;
import solver.variables.BoolVar;
import solver.variables.VariableFactory;
import solver.search.strategy.strategy.AbstractStrategy;
import util.ESat;
import util.tools.ArrayUtils;
import java.util.*;
public class LeastDiff2 extends AbstractProblem {
IntVar[] letters;
IntVar A,B,C,D,E,F,G,H,I,J;
IntVar Diff, X, Y;
@Override
public void createSolver() {
solver = new Solver("LeastDiff2");
}
@Override
public void buildModel () {
A = VariableFactory.enumerated("A", 0, 9, solver);
B = VariableFactory.enumerated("B", 0, 9, solver);
C = VariableFactory.enumerated("C", 0, 9, solver);
D = VariableFactory.enumerated("D", 0, 9, solver);
E = VariableFactory.enumerated("E", 0, 9, solver);
F = VariableFactory.enumerated("F", 0, 9, solver);
G = VariableFactory.enumerated("G", 0, 9, solver);
H = VariableFactory.enumerated("H", 0, 9, solver);
I = VariableFactory.enumerated("I", 0, 9, solver);
J = VariableFactory.enumerated("J", 0, 9, solver);
letters = new IntVar[] {A,B,C,D,E,F,G,H,I,J};
Diff = VariableFactory.bounded("Diff", 0, 1000, solver);
// temporary variables to build Diff
X = VariableFactory.bounded("X", 0, 100000, solver);
Y = VariableFactory.bounded("Y", 0, 100000, solver);
// all unique
solver.post(IntConstraintFactory.alldifferent(letters, "BC"));
// X = A+B+C+D+E
solver.post(IntConstraintFactory.scalar(new IntVar[]{A,B,C,D,E},
new int[]{10000, 1000, 100, 10, 1},
X));
// Y = F +G + H + I + J
solver.post(IntConstraintFactory.scalar(new IntVar[]{F,G,H,I,J},
new int[]{10000, 1000, 100, 10, 1},
Y));
// Diff = X - Y
/*
solver.post(IntConstraintFactory.arithm(X, ">", Y));
solver.post(IntConstraintFactory.distance(X, Y, "=", Diff));
*/
// Alternative
solver.post(IntConstraintFactory.sum(new IntVar[] {X,VariableFactory.minus(Y)}, Diff));
}
@Override
public void configureSearch() {
solver.set(IntStrategyFactory.firstFail_InDomainMin(letters));
}
@Override
public void solve() {
solver.findOptimalSolution(ResolutionPolicy.MINIMIZE, Diff);
}
@Override
public void prettyOut() {
LoggerFactory.getLogger("bench").info("LeastDiff2");
StringBuilder st = new StringBuilder();
st.append("\n" + X.getValue() + "-" + Y.getValue() + " = " + Diff.getValue() + "\n");
LoggerFactory.getLogger("bench").info(st.toString());
} // end puzzle
public static void main(String[] args) {
new LeastDiff2().execute(args);
}
} // end class
| hakank/hakank | choco3/LeastDiff2.java |
1,458 | /**
* FontGenerator3Pixel
* Copyright 2014 by Michael Christen
* First released 14.02.2014 at http://yacy.net
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program in the file lgpl21.txt
* If not, see <http://www.gnu.org/licenses/>.
*/
package net.yacy.visualization;
public class FontGenerator3Pixel {
/*
?0x20: !"#$%&'
0x28:()*+,-./
0x30:01234567
0x38:89:;<=>?
0x40:@ABCDEFG
0x48:HIJKLMNO
0x50:PQRSTUVW
0x58:XYZ[\]^_
0x60:`abcdefg
0x68:hijklmno
0x70:pqrstuvw
0x78:xyz{|}~
*/
private static final String[][] font =
{
{"...", //0x20
"...",
"..."},
{".X.",
".X.",
".+."},
{"X.X.",
"+.+.",
"...."},
{"+X+",
"X.X",
"+X+"},
{"+XX",
".X.",
"XX+"},
{".X..X",
"X.XX.",
".XXX.",
".XX.X",
"X..X."},
{"+XXX.",
"X+...",
"+X+X.",
"X.X+.",
"+X+X."},
{"..X..",
"..X..",
".....",
".....",
"....."},
{"....X", //0x28
"...X+",
"...X.",
"...X+",
"....X"},
{"X....",
"+X...",
".X...",
"+X...",
"x...."},
{".....",
".X.X.",
"++X++",
".x.X.",
"....."},
{".....",
"..X..",
".XXX.",
"..X..",
"....."},
{".....",
".....",
"..X..",
".+X..",
".X..."},
{".....",
".....",
".XXX.",
".....",
"....."},
{".....",
".....",
".....",
".....",
"..X.."},
{"....X",
"...X+",
"..X+.",
".X+..",
"X+..."},
{".XXX.", //0x30
"X+.XX",
"X.X.X",
"XX.+X",
".XXX."},
{"..X..",
".XX..",
"..X..",
"..X..",
".XXX."},
{".XXX.",
"X+.+X",
".+XX+",
"+X+..",
"XXXXX"},
{".XXX.",
"X+.+X",
"...X+",
"X+.+X",
".XXX."},
{".+XX.",
"+X+X.",
"XXXXX",
"...X.",
"...X."},
{"XXXXX",
"X....",
"XXXX.",
"...+X",
"XXXX."},
{".XXX.",
"X+...",
"XXXX+",
"X+.+X",
".XXX."},
{"XXXXX",
"..+X+",
"..X+.",
"..X..",
"..X.."},
{".XXX.", //0x38
"X+.+X",
"+XXX.",
"X+.+X",
".XXX."},
{".XXX.",
"X+.+X",
"+XXXX",
"...+X",
".XXX."},
{".....",
"..X..",
".....",
"..X..",
"....."},
{".....",
"..X..",
".....",
".+X..",
".X..."},
{"...X+",
"..X+.",
".X+..",
"..X+.",
"...X+"},
{".....",
"XXXXX",
".....",
"XXXXX",
"....."},
{"+X...",
".+X..",
"..+X.",
".+X..",
"+X..."},
{".XXX.",
"..+X.",
"..X..",
".....",
"..X.."},
{"+XXX+", //0x40
"XXX.X",
"X+XX+",
"X+...",
".XXX."},
{".+X+.",
"+X+X+",
"X+.+X",
"XXXXX",
"X...X"},
{"XXX+.",
"X.+X.",
"XXXX+",
"X..+X",
"XXXX."},
{".XXX.",
"X+...",
"X....",
"X+...",
".XXX."},
{"XXXX.",
"X..+X",
"X...X",
"X..+X",
"XXXX."},
{"XXXXX",
"X....",
"XXX..",
"X....",
"XXXXX"},
{"XXXXX",
"X....",
"XXX..",
"X....",
"X...."},
{".XXX.",
"X+...",
"X.XXX",
"X..+X",
".XXX."},
{"X...X", //0x48
"X...X",
"XXXXX",
"X...X",
"X...X"},
{"XXXXX",
"..X..",
"..X..",
"..X..",
"XXXXX"},
{"XXXXX",
"....X",
"....X",
"X+.+X",
".XXX."},
{"X..+X",
"X.+X.",
"XXX..",
"X.+X.",
"X..+X"},
{"X....",
"X....",
"X....",
"X....",
"XXXXX"},
{"X...X",
"XX.XX",
"X+X+X",
"X.+.X",
"X...X"},
{"X+..X",
"XX+.X",
"X+X+X",
"X.+XX",
"X..+X"},
{".XXX.",
"X+.+X",
"X...X",
"X+.+X",
".XXX."},
{"XXXX.", //0x50
"X..+X",
"XXXX.",
"X....",
"X...."},
{".XXX.",
"X+.+X",
"X.X+X",
"X++X+",
".XX+X"},
{"XXXX.",
"X..+X",
"XXXX.",
"X.+X.",
"X..+X"},
{".XXX+",
"X+...",
".XXX.",
"...+X",
"+XXX."},
{"XXXXX",
"..X..",
"..X..",
"..X..",
"..X.."},
{"X...X",
"X...X",
"X...X",
"X+.+X",
".XXX."},
{"X...X",
"X...X",
"X+.+X",
".X+X.",
"..X.."},
{"X...X",
"X...X",
"X.+.X",
"X+X+X",
".X.X."},
{"X+.+X", //0x58
"+X+X+",
".+X+.",
"+X+X+",
"X+.+X"},
{"X...X",
"+X.X+",
".+X+.",
"..X..",
"..X.."},
{"XXXXX",
"..+X+",
".+X+.",
"+X+..",
"XXXXX"},
{"..XXX",
"..X..",
"..X..",
"..X..",
"..XXX"},
{"X+...",
"+X+..",
".+X+.",
"..+X+",
"...+X"},
{"XXX..",
"..X..",
"..X..",
"..X..",
"XXX.."},
{".+X+.",
"+X+X+",
"X+.+X",
".....",
"....."},
{".....",
".....",
".....",
".....",
"XXXXX"},
{".X+..", //0x60
".+X..",
".....",
".....",
"....."},
{".....",
".....",
"+XXXX",
"X+..X",
".XXXX"},
{"X....",
"X....",
"XXXX.",
"X..+X",
"XXXX."},
{".....",
".....",
".XXXX",
"X+...",
".XXXX"},
{"....X",
"....X",
".XXXX",
"X+..X",
".XXXX"},
{".....",
"+XX+.",
"X.+X.",
"X+X+.",
".XXXX"},
{"..XX.",
"..X+.",
".XXX.",
"..X..",
"..X.."},
{".....",
".+XX+",
".X+.X",
".+X+X",
"XXXX."},
{"X....", //0x68
"X....",
"X+XX+",
"XX++X",
"X+..X"},
{"..X..",
".....",
"..X..",
"..X..",
"..X.."},
{"..X..",
".....",
"..X..",
".+X..",
".X+.."},
{"X....",
"X....",
"X.+XX",
"XXX+.",
"X.+XX"},
{"..X..",
"..X..",
"..X..",
"..X..",
"..X.."},
{".....",
".....",
".X+X.",
"X+X+X",
"X.X.X"},
{".....",
".....",
".XXX.",
"X+.+X",
"X...X"},
{".....",
".....",
".XXX.",
"X+.+X",
".XXX."},
{".....", //0x70
"XXXX.",
"X..+X",
"XXXX.",
"X...."},
{".....",
".XXXX",
"X+..X",
".XXXX",
"....X"},
{".....",
"..+X.",
"..X+.",
"..X..",
"..X.."},
{".....",
".....",
".+XX.",
"+X++X",
"X++X."},
{"..X..",
".XXX.",
"..X..",
"..X..",
"..X.."},
{".....",
".....",
"X...X",
"X+.+X",
".XXX."},
{".....",
".....",
"X+.+X",
"+X+X+",
".+X+."},
{".....",
".....",
"X.X.X",
"X+X+X",
".X+X."},
{".....", //0x78
".....",
".X+X.",
".+X+.",
".X+X."},
{".....",
".....",
".X+X.",
".+X+.",
"..X.."},
{".....",
".....",
".XXXX",
".+X+.",
"XXXX."},
{"...XX",
"..+X+",
".+X+.",
"..+X+",
"...XX"},
{"..X..",
"..X..",
"..X..",
"..X..",
"..X.."},
{"XX...",
"+X+..",
".+X+.",
"+X+..",
"XX..."},
{".....",
"+X+..",
"X+X+X",
"..+X+",
"....."},
{"XXXXX",
"X...X",
"X...X",
"X...X",
"XXXXX"},
};
public static void main(final String[] args) {
String[] letter;
long b;
long v;
int c = 0;
String s;
for (int i = 0; i < font.length; i++) {
letter = font[i];
b = 0;
for (int j = 0; j < 5; j++) {
b = b << 10;
v = 1 << 9;
for (int col = 0; col < 5; col++) {
if (letter[j].charAt(col) == '+') b += v;
if (letter[j].charAt(col) == 'X') b += v + (v / 2);
v = v >> 2;
}
}
s = Long.toHexString(b).toUpperCase();
while (s.length() < 14) s = "0" + s;
System.out.print("0x" + s + "L,");
c++;
if (c >= 8) {
System.out.println();
c = 0;
}
}
}
}
| yacy/yacy_search_server | source/net/yacy/visualization/FontGenerator3Pixel.java |
1,459 | /*
* Tencent is pleased to support the open source community by making VasSonic available.
*
* Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
*
* https://opensource.org/licenses/BSD-3-Clause
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*
*
*/
package com.tencent.sonic.sdk;
import android.content.Intent;
import android.os.Looper;
import android.text.TextUtils;
import android.util.Log;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.SocketTimeoutException;
import java.net.URL;
import java.net.URLConnection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.zip.GZIPInputStream;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLSession;
/**
*
* The abstract class <code>SonicSessionConnection</code> is the superclass
* of all classes that represent a communications link between the
* application and a URL. Instances of this class can be used both to
* read from and to write to the resource referenced by the URL
*/
public abstract class SonicSessionConnection {
private static final String TAG = SonicConstants.SONIC_SDK_LOG_PREFIX + "SonicSessionConnection";
/**
* HTTP header:sonic-etag-key. <br>
* This header represents that the "eTag" key can be modified by service.
*/
public final static String CUSTOM_HEAD_FILED_SONIC_ETAG_KEY = "sonic-etag-key";
/**
* HTTP header:eTag. <br>
* This header represents SHA1 value of the whole website, including template and data.
*/
public final static String CUSTOM_HEAD_FILED_ETAG = "eTag";
/**
* HTTP header:accept-diff. <br>
* This header represents that client accepts data incremental scene updates or not.
*/
public final static String CUSTOM_HEAD_FILED_ACCEPT_DIFF = "accept-diff";
/**
* HTTP header:template_tag. <br>
* This header represents SHA1 value of the template file.
*/
public final static String CUSTOM_HEAD_FILED_TEMPLATE_TAG = "template-tag";
/**
* HTTP header:template_change. <br>
* This header indicates whether the template file has changed or not.
*/
public final static String CUSTOM_HEAD_FILED_TEMPLATE_CHANGE = "template-change";
/**
* HTTP header:cache-offline. <br>
* This header indicates whether the website needs to be refreshed or not.
*/
public final static String CUSTOM_HEAD_FILED_CACHE_OFFLINE = "cache-offline";
/**
* HTTP header:dns-prefetch-address <br>
* This header represents the ip address of the server. <br>
* Sonic Connection will use this ip to connect to server to avoid the cost time of DNS resolution.
*/
public final static String DNS_PREFETCH_ADDRESS = "dns-prefetch-address";
/**
* HTTP Header:sdk_version. <br>
* This header represents the version of SDK.
*/
public final static String CUSTOM_HEAD_FILED_SDK_VERSION = "sonic-sdk-version";
/**
* HTTP Header:dns-prefetch. <br>
* This header indicates that Sonic connection has used the ip represented by {@link #DNS_PREFETCH_ADDRESS}
*/
public final static String CUSTOM_HEAD_FILED_DNS_PREFETCH = "sonic-dns-prefetch";
/**
* HTTP header: . <br>
*
*/
public final static String CUSTOM_HEAD_FILED_HTML_SHA1 = "sonic-html-sha1";
/**
* HTTP Header:Content-Security-Policy. <br>
* This header represents the HTML CSP.
*/
public final static String HTTP_HEAD_CSP = "Content-Security-Policy";
/**
* HTTP Header:Content-Security-Policy-Report-Only. <br>
* This header represents the HTML Content-Security-Policy-Report-Only.
*/
public final static String HTTP_HEAD_CSP_REPORT_ONLY = "Content-Security-Policy-Report-Only";
/**
* HTTP Header:Set-Cookie. <br>
* This header represents the HTML Set-Cookie.
*/
public final static String HTTP_HEAD_FILED_SET_COOKIE = "Set-Cookie";
/**
* HTTP Header : Cache-Control. <br>
* This header represents the strategy of cache control.
*/
public final static String HTTP_HEAD_FIELD_CACHE_CONTROL = "Cache-Control";
/**
* HTTP Header : Expires. <br>
*/
public final static String HTTP_HEAD_FIELD_EXPIRES = "Expires";
/**
* HTTP 1.0 Header : Pragma. <br>
* This old header represents the old strategy of cache control.
*/
public final static String HTTP_HEAD_FIELD_PRAGMA = "Pragma"; //1.0
/**
* HTTP Header : Content-Type. <br>
*/
public final static String HTTP_HEAD_FIELD_CONTENT_TYPE = "Content-Type";
/**
* HTTP Header : Content-Length. <br>
*/
public final static String HTTP_HEAD_FIELD_CONTENT_LENGTH = "Content-Length";
/**
* HTTP Request Header : Cookie. <br>
*/
public final static String HTTP_HEAD_FIELD_COOKIE = "Cookie";
/**
* HTTP Request Header:User-Agent. <br>
*/
public final static String HTTP_HEAD_FILED_USER_AGENT = "User-Agent";
public final static String HTTP_HEAD_FILED_IF_NOT_MATCH = "If-None-Match";
/**
* HTTP Response Header: Link. <br>
*/
public final static String CUSTOM_HEAD_FILED_LINK = "sonic-link";
/**
* SonicSession Object used by SonicSessionConnection.
*/
protected final SonicSession session;
/**
* This intent saves all of the initialization param.
*/
protected final Intent intent;
/**
* The input stream that reads from this open connection.
*/
protected BufferedInputStream responseStream;
/**
* The field that reads from the headerFields of this open connection.
*/
protected String mCustomHeadFieldEtagKey;
/**
* Constructor
* @param session The SonicSession instance
* @param intent The intent
*/
public SonicSessionConnection(SonicSession session, Intent intent) {
this.session = session;
this.intent = intent != null ? intent : new Intent();
}
/**
*
* Opens a communications link to the resource referenced by Sonic session
*
* @return Returns the response code of connection
*/
public synchronized int connect() {
return internalConnect();
}
/**
* Disconnect the communications link to the resource referenced by Sonic session
*/
public abstract void disconnect();
public abstract int getResponseCode();
public abstract Map<String, List<String>> getResponseHeaderFields();
/**
*
* @param key the name of a header field.
* @return Returns the value of the named header field.
*/
public abstract String getResponseHeaderField(String key);
/**
*
* @return Returns an input stream that reads from this open connection.
*/
public synchronized BufferedInputStream getResponseStream() {
if (responseStream == null) {
responseStream = internalGetResponseStream();
}
return responseStream;
}
protected abstract int internalConnect();
protected abstract BufferedInputStream internalGetResponseStream();
public String getCustomHeadFieldEtagKey() {
if (TextUtils.isEmpty(mCustomHeadFieldEtagKey)) {
mCustomHeadFieldEtagKey = internalGetCustomHeadFieldEtag();
}
return mCustomHeadFieldEtagKey;
}
protected abstract String internalGetCustomHeadFieldEtag();
public static class SessionConnectionDefaultImpl extends SonicSessionConnection {
/**
* A default http connection referred to by the {@code com.tencent.sonic.sdk.SonicSession#currUrl}.
*/
protected final URLConnection connectionImpl;
public SessionConnectionDefaultImpl(SonicSession session, Intent intent) {
super(session, intent);
connectionImpl = createConnection();
initConnection(connectionImpl);
}
protected URLConnection createConnection() {
String currentUrl = session.srcUrl;
if (TextUtils.isEmpty(currentUrl)) {
return null;
}
URLConnection connection = null;
try {
URL url = new URL(currentUrl);
String dnsPrefetchAddress = intent.getStringExtra(SonicSessionConnection.DNS_PREFETCH_ADDRESS);
String originHost = null;
/*
* Use the ip value mapped by {@code SonicSessionConnection.DNS_PREFETCH_ADDRESS} to avoid the cost time of DNS resolution.
* Meanwhile it can reduce the risk from hijacking http session.
*/
if (!TextUtils.isEmpty(dnsPrefetchAddress)) {
originHost = url.getHost();
url = new URL(currentUrl.replace(originHost, dnsPrefetchAddress));
SonicUtils.log(TAG, Log.INFO, "create UrlConnection with DNS-Prefetch(" + originHost + " -> " + dnsPrefetchAddress + ").");
}
connection = url.openConnection();
if (connection != null) {
if (connection instanceof HttpURLConnection) {
((HttpURLConnection) connection).setInstanceFollowRedirects(false);
}
if (!TextUtils.isEmpty(originHost)) {
/*
* If originHost is not empty, that means connection uses the ip value instead of http host.
* So http header need to set the Host and {@link com.tencent.sonic.sdk.SonicSessionConnection.CUSTOM_HEAD_FILED_DNS_PREFETCH} request property.
*/
connection.setRequestProperty("Host", originHost);
connection.setRequestProperty(SonicSessionConnection.CUSTOM_HEAD_FILED_DNS_PREFETCH, url.getHost());
if (connection instanceof HttpsURLConnection) { // 如果属于https,需要特殊处理,比如支持sni
/*
* If the scheme of url is https, then it needs extra processing, such as the sni support.
*/
final String finalOriginHost = originHost;
final URL finalUrl = url;
HttpsURLConnection httpsConnection = (HttpsURLConnection) connection;
httpsConnection.setSSLSocketFactory(new SonicSniSSLSocketFactory(SonicEngine.getInstance().getRuntime().getContext(), originHost));
httpsConnection.setHostnameVerifier(new HostnameVerifier() {
@Override
public boolean verify(String hostname, SSLSession session) {
boolean verifySuccess = false;
long startTime = System.currentTimeMillis();
if (finalUrl.getHost().equals(hostname)) {
verifySuccess = HttpsURLConnection.getDefaultHostnameVerifier().verify(finalOriginHost, session);
SonicUtils.log(TAG, Log.DEBUG, "verify hostname cost " + (System.currentTimeMillis() - startTime) + " ms.");
}
return verifySuccess;
}
});
}
}
}
} catch (Throwable e) {
if (connection != null) {
connection = null;
}
SonicUtils.log(TAG, Log.ERROR, "create UrlConnection fail, error:" + e.getMessage() + ".");
}
return connection;
}
protected boolean initConnection(URLConnection connection) {
if (null != connection) {
SonicSessionConfig config = session.config;
connection.setConnectTimeout(config.CONNECT_TIMEOUT_MILLIS);
connection.setReadTimeout(config.READ_TIMEOUT_MILLIS);
/*
* {@link SonicSessionConnection#CUSTOM_HEAD_FILED_ACCEPT_DIFF} is need to be set If client accepts incrementally updates. <br>
* <p><b>Note: It doesn't support incrementally updated for template file.</b><p/>
*/
connection.setRequestProperty(CUSTOM_HEAD_FILED_ACCEPT_DIFF, config.ACCEPT_DIFF_DATA ? "true" : "false");
// String eTag = intent.getStringExtra(getCustomHeadFieldEtagKey());
String eTag = intent.getStringExtra(!TextUtils.isEmpty(mCustomHeadFieldEtagKey) ? mCustomHeadFieldEtagKey : CUSTOM_HEAD_FILED_ETAG);
if (null == eTag) eTag = "";
connection.setRequestProperty(HTTP_HEAD_FILED_IF_NOT_MATCH, eTag);
String templateTag = intent.getStringExtra(CUSTOM_HEAD_FILED_TEMPLATE_TAG);
if (null == templateTag) templateTag = "";
connection.setRequestProperty(CUSTOM_HEAD_FILED_TEMPLATE_TAG, templateTag);
connection.setRequestProperty("method", "GET");
connection.setRequestProperty("Accept-Encoding", "gzip");
connection.setRequestProperty("Accept-Language", "zh-CN,zh;");
connection.setRequestProperty(CUSTOM_HEAD_FILED_SDK_VERSION, "Sonic/" + SonicConstants.SONIC_VERSION_NUM);
// set custom request headers
if (null != config.customRequestHeaders && 0 != config.customRequestHeaders.size()) {
for (Map.Entry<String, String> entry : config.customRequestHeaders.entrySet()) {
connection.setRequestProperty(entry.getKey(), entry.getValue());
}
}
String cookie = intent.getStringExtra(HTTP_HEAD_FIELD_COOKIE);
if (!TextUtils.isEmpty(cookie)) {
connection.setRequestProperty(HTTP_HEAD_FIELD_COOKIE, cookie);
} else {
SonicUtils.log(TAG, Log.ERROR, "create UrlConnection cookie is empty");
}
connection.setRequestProperty(HTTP_HEAD_FILED_USER_AGENT, intent.getStringExtra(HTTP_HEAD_FILED_USER_AGENT));
return true;
}
return false;
}
@Override
protected synchronized int internalConnect() {
if (connectionImpl instanceof HttpURLConnection) {
HttpURLConnection httpURLConnection = (HttpURLConnection) connectionImpl;
try {
httpURLConnection.connect();
return SonicConstants.ERROR_CODE_SUCCESS;
} catch (Throwable e) {
String errMsg = e.getMessage();
SonicUtils.log(TAG, Log.ERROR, "connect error:" + errMsg);
if (e instanceof IOException) {
if (e instanceof SocketTimeoutException) {
return SonicConstants.ERROR_CODE_CONNECT_TOE;
}
if (!TextUtils.isEmpty(errMsg) && errMsg.contains("timeoutexception")) {
return SonicConstants.ERROR_CODE_CONNECT_TOE;
}
return SonicConstants.ERROR_CODE_CONNECT_IOE;
}
if (e instanceof NullPointerException) {
return SonicConstants.ERROR_CODE_CONNECT_NPE;
}
}
}
return SonicConstants.ERROR_CODE_UNKNOWN;
}
@Override
public void disconnect() {
if (connectionImpl instanceof HttpURLConnection) {
final HttpURLConnection httpURLConnection = (HttpURLConnection) connectionImpl;
if (Looper.myLooper() == Looper.getMainLooper()) {
SonicEngine.getInstance().getRuntime().postTaskToThread(new Runnable() {
@Override
public void run() {
try {
httpURLConnection.disconnect();
} catch (Throwable e) {
SonicUtils.log(TAG, Log.ERROR, "disconnect error:" + e.getMessage());
}
}
}, 0);
} else {
try {
httpURLConnection.disconnect();
} catch (Exception e) {
SonicUtils.log(TAG, Log.ERROR, "disconnect error:" + e.getMessage());
}
}
}
}
@Override
protected BufferedInputStream internalGetResponseStream() {
if (null == responseStream && null != connectionImpl) {
try {
InputStream inputStream = connectionImpl.getInputStream();
if ("gzip".equalsIgnoreCase(connectionImpl.getContentEncoding())) {
responseStream = new BufferedInputStream(new GZIPInputStream(inputStream));
} else {
responseStream = new BufferedInputStream(inputStream);
}
} catch (Throwable e) {
SonicUtils.log(TAG, Log.ERROR, "getResponseStream error:" + e.getMessage() + ".");
}
}
return responseStream;
}
@Override
public int getResponseCode() {
if (connectionImpl instanceof HttpURLConnection) {
try {
return ((HttpURLConnection) connectionImpl).getResponseCode();
} catch (Throwable e) {
String errMsg = e.getMessage();
SonicUtils.log(TAG, Log.ERROR, "getResponseCode error:" + errMsg);
if (e instanceof IOException) {
if (e instanceof SocketTimeoutException) {
return SonicConstants.ERROR_CODE_CONNECT_TOE;
}
if (!TextUtils.isEmpty(errMsg) && errMsg.contains("timeoutexception")) {
return SonicConstants.ERROR_CODE_CONNECT_TOE;
}
return SonicConstants.ERROR_CODE_CONNECT_IOE;
}
if (e instanceof NullPointerException) {
return SonicConstants.ERROR_CODE_CONNECT_NPE;
}
}
}
return SonicConstants.ERROR_CODE_UNKNOWN;
}
@Override
public Map<String, List<String>> getResponseHeaderFields() {
if (null == connectionImpl) {
return null;
}
try {
return connectionImpl.getHeaderFields();
} catch (Throwable e) {
SonicUtils.log(TAG, Log.ERROR, "getHeaderFields error:" + e.getMessage());
return new HashMap<String, List<String>>();
}
}
@Override
public String getResponseHeaderField(String key) {
Map<String, List<String>> responseHeaderFields = getResponseHeaderFields();
if (null != responseHeaderFields && 0 != responseHeaderFields.size()) {
List<String> responseHeaderValues = responseHeaderFields.get(key.toLowerCase());
if (null != responseHeaderValues && 0 != responseHeaderValues.size()) {
StringBuilder stringBuilder = new StringBuilder(responseHeaderValues.get(0));
for (int index = 1, size = responseHeaderValues.size(); index < size; ++index) {
stringBuilder.append(',');
stringBuilder.append(responseHeaderValues.get(index));
}
return stringBuilder.toString();
}
}
return null;
}
@Override
protected String internalGetCustomHeadFieldEtag() {
String sonicEtagValue = getResponseHeaderField(CUSTOM_HEAD_FILED_SONIC_ETAG_KEY);
SonicUtils.log(TAG, Log.INFO, "internalGetCustomHeadFieldEtag ~ sonicEtag:" + sonicEtagValue);
return !TextUtils.isEmpty(sonicEtagValue) ? sonicEtagValue : CUSTOM_HEAD_FILED_ETAG;
}
}
} | Tencent/VasSonic | sonic-android/sdk/src/main/java/com/tencent/sonic/sdk/SonicSessionConnection.java |
1,460 | /**
* FontGenerator5Pixel
* Copyright 2005 by Michael Christen
* First released 31.10.2005 at http://yacy.net
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program in the file lgpl21.txt
* If not, see <http://www.gnu.org/licenses/>.
*/
package net.yacy.visualization;
public class FontGenerator5Pixel {
/*
?0x20: !"#$%&'
0x28:()*+,-./
0x30:01234567
0x38:89:;<=>?
0x40:@ABCDEFG
0x48:HIJKLMNO
0x50:PQRSTUVW
0x58:XYZ[\]^_
0x60:`abcdefg
0x68:hijklmno
0x70:pqrstuvw
0x78:xyz{|}~
*/
private static final String[][] font =
{
{".....", //0x20
".....",
".....",
".....",
"....."},
{"..X..",
"..X..",
"..X..",
".....",
"..X.."},
{".X.X.",
".X.X.",
".....",
".....",
"....."},
{".X.X.",
"XXXXX",
".X.X.",
"XXXXX",
".X.X."},
{"+XXXX",
"X.X..",
"+XXX+",
"..X.X",
"XXXX+"},
{".X..X",
"X.XX.",
".XXX.",
".XX.X",
"X..X."},
{"+XXX.",
"X+...",
"+X+X.",
"X.X+.",
"+X+X."},
{"..X..",
"..X..",
".....",
".....",
"....."},
{"....X", //0x28
"...X+",
"...X.",
"...X+",
"....X"},
{"X....",
"+X...",
".X...",
"+X...",
"x...."},
{".....",
".X.X.",
"++X++",
".x.X.",
"....."},
{".....",
"..X..",
".XXX.",
"..X..",
"....."},
{".....",
".....",
"..X..",
".+X..",
".X..."},
{".....",
".....",
".XXX.",
".....",
"....."},
{".....",
".....",
".....",
".....",
"..X.."},
{"....X",
"...X+",
"..X+.",
".X+..",
"X+..."},
{".XXX.", //0x30
"X+.XX",
"X.X.X",
"XX.+X",
".XXX."},
{"..X..",
".XX..",
"..X..",
"..X..",
".XXX."},
{".XXX.",
"X+.+X",
".+XX+",
"+X+..",
"XXXXX"},
{".XXX.",
"X+.+X",
"...X+",
"X+.+X",
".XXX."},
{".+XX.",
"+X+X.",
"XXXXX",
"...X.",
"...X."},
{"XXXXX",
"X....",
"XXXX.",
"...+X",
"XXXX."},
{".XXX.",
"X+...",
"XXXX+",
"X+.+X",
".XXX."},
{"XXXXX",
"..+X+",
"..X+.",
"..X..",
"..X.."},
{".XXX.", //0x38
"X+.+X",
"+XXX.",
"X+.+X",
".XXX."},
{".XXX.",
"X+.+X",
"+XXXX",
"...+X",
".XXX."},
{".....",
"..X..",
".....",
"..X..",
"....."},
{".....",
"..X..",
".....",
".+X..",
".X..."},
{"...X+",
"..X+.",
".X+..",
"..X+.",
"...X+"},
{".....",
"XXXXX",
".....",
"XXXXX",
"....."},
{"+X...",
".+X..",
"..+X.",
".+X..",
"+X..."},
{".XXX.",
"..+X.",
"..X..",
".....",
"..X.."},
{"+XXX+", //0x40
"XXX.X",
"X+XX+",
"X+...",
".XXX."},
{".+X+.",
"+X+X+",
"X+.+X",
"XXXXX",
"X...X"},
{"XXX+.",
"X.+X.",
"XXXX+",
"X..+X",
"XXXX."},
{".XXX.",
"X+...",
"X....",
"X+...",
".XXX."},
{"XXXX.",
"X..+X",
"X...X",
"X..+X",
"XXXX."},
{"XXXXX",
"X....",
"XXX..",
"X....",
"XXXXX"},
{"XXXXX",
"X....",
"XXX..",
"X....",
"X...."},
{".XXX.",
"X+...",
"X.XXX",
"X..+X",
".XXX."},
{"X...X", //0x48
"X...X",
"XXXXX",
"X...X",
"X...X"},
{"XXXXX",
"..X..",
"..X..",
"..X..",
"XXXXX"},
{"XXXXX",
"....X",
"....X",
"X+.+X",
".XXX."},
{"X..+X",
"X.+X.",
"XXX..",
"X.+X.",
"X..+X"},
{"X....",
"X....",
"X....",
"X....",
"XXXXX"},
{"X...X",
"XX.XX",
"X+X+X",
"X.+.X",
"X...X"},
{"X+..X",
"XX+.X",
"X+X+X",
"X.+XX",
"X..+X"},
{".XXX.",
"X+.+X",
"X...X",
"X+.+X",
".XXX."},
{"XXXX.", //0x50
"X..+X",
"XXXX.",
"X....",
"X...."},
{".XXX.",
"X+.+X",
"X.X+X",
"X++X+",
".XX+X"},
{"XXXX.",
"X..+X",
"XXXX.",
"X.+X.",
"X..+X"},
{".XXX+",
"X+...",
".XXX.",
"...+X",
"+XXX."},
{"XXXXX",
"..X..",
"..X..",
"..X..",
"..X.."},
{"X...X",
"X...X",
"X...X",
"X+.+X",
".XXX."},
{"X...X",
"X...X",
"X+.+X",
".X+X.",
"..X.."},
{"X...X",
"X...X",
"X.+.X",
"X+X+X",
".X.X."},
{"X+.+X", //0x58
"+X+X+",
".+X+.",
"+X+X+",
"X+.+X"},
{"X...X",
"+X.X+",
".+X+.",
"..X..",
"..X.."},
{"XXXXX",
"..+X+",
".+X+.",
"+X+..",
"XXXXX"},
{"..XXX",
"..X..",
"..X..",
"..X..",
"..XXX"},
{"X+...",
"+X+..",
".+X+.",
"..+X+",
"...+X"},
{"XXX..",
"..X..",
"..X..",
"..X..",
"XXX.."},
{".+X+.",
"+X+X+",
"X+.+X",
".....",
"....."},
{".....",
".....",
".....",
".....",
"XXXXX"},
{".X+..", //0x60
".+X..",
".....",
".....",
"....."},
{".....",
".....",
"+XXXX",
"X+..X",
".XXXX"},
{"X....",
"X....",
"XXXX.",
"X..+X",
"XXXX."},
{".....",
".....",
".XXXX",
"X+...",
".XXXX"},
{"....X",
"....X",
".XXXX",
"X+..X",
".XXXX"},
{".....",
"+XX+.",
"X.+X.",
"X+X+.",
".XXXX"},
{"..XX.",
"..X+.",
".XXX.",
"..X..",
"..X.."},
{".....",
".+XX+",
".X+.X",
".+X+X",
"XXXX."},
{"X....", //0x68
"X....",
"X+XX+",
"XX++X",
"X+..X"},
{"..X..",
".....",
"..X..",
"..X..",
"..X.."},
{"..X..",
".....",
"..X..",
".+X..",
".X+.."},
{"X....",
"X....",
"X.+XX",
"XXX+.",
"X.+XX"},
{"..X..",
"..X..",
"..X..",
"..X..",
"..X.."},
{".....",
".....",
".X+X.",
"X+X+X",
"X.X.X"},
{".....",
".....",
".XXX.",
"X+.+X",
"X...X"},
{".....",
".....",
".XXX.",
"X+.+X",
".XXX."},
{".....", //0x70
"XXXX.",
"X..+X",
"XXXX.",
"X...."},
{".....",
".XXXX",
"X+..X",
".XXXX",
"....X"},
{".....",
"..+X.",
"..X+.",
"..X..",
"..X.."},
{".....",
".....",
".+XX.",
"+X++X",
"X++X."},
{"..X..",
".XXX.",
"..X..",
"..X..",
"..X.."},
{".....",
".....",
"X...X",
"X+.+X",
".XXX."},
{".....",
".....",
"X+.+X",
"+X+X+",
".+X+."},
{".....",
".....",
"X.X.X",
"X+X+X",
".X+X."},
{".....", //0x78
".....",
".X+X.",
".+X+.",
".X+X."},
{".....",
".....",
".X+X.",
".+X+.",
"..X.."},
{".....",
".....",
".XXXX",
".+X+.",
"XXXX."},
{"...XX",
"..+X+",
".+X+.",
"..+X+",
"...XX"},
{"..X..",
"..X..",
"..X..",
"..X..",
"..X.."},
{"XX...",
"+X+..",
".+X+.",
"+X+..",
"XX..."},
{".....",
"+X+..",
"X+X+X",
"..+X+",
"....."},
{"XXXXX",
"X...X",
"X...X",
"X...X",
"XXXXX"},
};
public static void main(final String[] args) {
String[] letter;
long b;
long v;
int c = 0;
String s;
for (int i = 0; i < font.length; i++) {
letter = font[i];
b = 0;
for (int j = 0; j < 5; j++) {
b = b << 10;
v = 1 << 9;
for (int col = 0; col < 5; col++) {
if (letter[j].charAt(col) == '+') b += v;
if (letter[j].charAt(col) == 'X') b += v + (v / 2);
v = v >> 2;
}
}
s = Long.toHexString(b).toUpperCase();
while (s.length() < 14) s = "0" + s;
System.out.print("0x" + s + "L,");
c++;
if (c >= 8) {
System.out.println();
c = 0;
}
}
}
}
| yacy/yacy_search_server | source/net/yacy/visualization/FontGenerator5Pixel.java |
1,461 | package com.sothree.slidinguppanel;
import android.annotation.SuppressLint;
import android.content.Context;
import android.content.res.TypedArray;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.PixelFormat;
import android.graphics.Rect;
import android.graphics.drawable.Drawable;
import android.os.Bundle;
import android.os.Parcelable;
import android.support.v4.view.MotionEventCompat;
import android.support.v4.view.ViewCompat;
import android.util.AttributeSet;
import android.util.Log;
import android.view.Gravity;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import android.view.accessibility.AccessibilityEvent;
import android.view.animation.AnimationUtils;
import android.view.animation.Interpolator;
import com.sothree.slidinguppanel.library.R;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
public class SlidingUpPanelLayout extends ViewGroup {
private static final String TAG = SlidingUpPanelLayout.class.getSimpleName();
/**
* Default peeking out panel height
*/
private static final int DEFAULT_PANEL_HEIGHT = 68; // dp;
/**
* Default anchor point height
*/
private static final float DEFAULT_ANCHOR_POINT = 1.0f; // In relative %
/**
* Default initial state for the component
*/
private static PanelState DEFAULT_SLIDE_STATE = PanelState.COLLAPSED;
/**
* Default height of the shadow above the peeking out panel
*/
private static final int DEFAULT_SHADOW_HEIGHT = 4; // dp;
/**
* If no fade color is given by default it will fade to 80% gray.
*/
private static final int DEFAULT_FADE_COLOR = 0x99000000;
/**
* Default Minimum velocity that will be detected as a fling
*/
private static final int DEFAULT_MIN_FLING_VELOCITY = 400; // dips per second
/**
* Default is set to false because that is how it was written
*/
private static final boolean DEFAULT_OVERLAY_FLAG = false;
/**
* Default is set to true for clip panel for performance reasons
*/
private static final boolean DEFAULT_CLIP_PANEL_FLAG = true;
/**
* Default attributes for layout
*/
private static final int[] DEFAULT_ATTRS = new int[]{
android.R.attr.gravity
};
/**
* Tag for the sliding state stored inside the bundle
*/
public static final String SLIDING_STATE = "sliding_state";
/**
* Minimum velocity that will be detected as a fling
*/
private int mMinFlingVelocity = DEFAULT_MIN_FLING_VELOCITY;
/**
* The fade color used for the panel covered by the slider. 0 = no fading.
*/
private int mCoveredFadeColor = DEFAULT_FADE_COLOR;
/**
* Default parallax length of the main view
*/
private static final int DEFAULT_PARALLAX_OFFSET = 0;
/**
* The paint used to dim the main layout when sliding
*/
private final Paint mCoveredFadePaint = new Paint();
/**
* Drawable used to draw the shadow between panes.
*/
private final Drawable mShadowDrawable;
/**
* The size of the overhang in pixels.
*/
private int mPanelHeight = -1;
/**
* The size of the shadow in pixels.
*/
private int mShadowHeight = -1;
/**
* Parallax offset
*/
private int mParallaxOffset = -1;
/**
* True if the collapsed panel should be dragged up.
*/
private boolean mIsSlidingUp;
/**
* Panel overlays the windows instead of putting it underneath it.
*/
private boolean mOverlayContent = DEFAULT_OVERLAY_FLAG;
/**
* The main view is clipped to the main top border
*/
private boolean mClipPanel = DEFAULT_CLIP_PANEL_FLAG;
/**
* If provided, the panel can be dragged by only this view. Otherwise, the entire panel can be
* used for dragging.
*/
private View mDragView;
/**
* If provided, the panel can be dragged by only this view. Otherwise, the entire panel can be
* used for dragging.
*/
private int mDragViewResId = -1;
/**
* If provided, the panel will transfer the scroll from this view to itself when needed.
*/
private View mScrollableView;
private int mScrollableViewResId;
private ScrollableViewHelper mScrollableViewHelper = new ScrollableViewHelper();
/**
* The child view that can slide, if any.
*/
private View mSlideableView;
/**
* The main view
*/
private View mMainView;
/**
* Current state of the slideable view.
*/
public enum PanelState {
EXPANDED,
COLLAPSED,
ANCHORED,
HIDDEN,
DRAGGING
}
private PanelState mSlideState = DEFAULT_SLIDE_STATE;
/**
* If the current slide state is DRAGGING, this will store the last non dragging state
*/
private PanelState mLastNotDraggingSlideState = DEFAULT_SLIDE_STATE;
/**
* How far the panel is offset from its expanded position.
* range [0, 1] where 0 = collapsed, 1 = expanded.
*/
private float mSlideOffset;
/**
* How far in pixels the slideable panel may move.
*/
private int mSlideRange;
/**
* An anchor point where the panel can stop during sliding
*/
private float mAnchorPoint = 1.f;
/**
* A panel view is locked into internal scrolling or another condition that
* is preventing a drag.
*/
private boolean mIsUnableToDrag;
/**
* Flag indicating that sliding feature is enabled\disabled
*/
private boolean mIsTouchEnabled;
private float mPrevMotionX;
private float mPrevMotionY;
private float mInitialMotionX;
private float mInitialMotionY;
private boolean mIsScrollableViewHandlingTouch = false;
private final List<PanelSlideListener> mPanelSlideListeners = new CopyOnWriteArrayList<>();
private View.OnClickListener mFadeOnClickListener;
private final ViewDragHelper mDragHelper;
/**
* Stores whether or not the pane was expanded the last time it was slideable.
* If expand/collapse operations are invoked this state is modified. Used by
* instance state save/restore.
*/
private boolean mFirstLayout = true;
private final Rect mTmpRect = new Rect();
/**
* Listener for monitoring events about sliding panes.
*/
public interface PanelSlideListener {
/**
* Called when a sliding pane's position changes.
*
* @param panel The child view that was moved
* @param slideOffset The new offset of this sliding pane within its range, from 0-1
*/
public void onPanelSlide(View panel, float slideOffset);
/**
* Called when a sliding panel state changes
*
* @param panel The child view that was slid to an collapsed position
*/
public void onPanelStateChanged(View panel, PanelState previousState, PanelState newState);
}
/**
* No-op stubs for {@link PanelSlideListener}. If you only want to implement a subset
* of the listener methods you can extend this instead of implement the full interface.
*/
public static class SimplePanelSlideListener implements PanelSlideListener {
@Override
public void onPanelSlide(View panel, float slideOffset) {
}
@Override
public void onPanelStateChanged(View panel, PanelState previousState, PanelState newState) {
}
}
public SlidingUpPanelLayout(Context context) {
this(context, null);
}
public SlidingUpPanelLayout(Context context, AttributeSet attrs) {
this(context, attrs, 0);
}
public SlidingUpPanelLayout(Context context, AttributeSet attrs, int defStyle) {
super(context, attrs, defStyle);
if (isInEditMode()) {
mShadowDrawable = null;
mDragHelper = null;
return;
}
Interpolator scrollerInterpolator = null;
if (attrs != null) {
TypedArray defAttrs = context.obtainStyledAttributes(attrs, DEFAULT_ATTRS);
if (defAttrs != null) {
int gravity = defAttrs.getInt(0, Gravity.NO_GRAVITY);
setGravity(gravity);
defAttrs.recycle();
}
TypedArray ta = context.obtainStyledAttributes(attrs, R.styleable.SlidingUpPanelLayout);
if (ta != null) {
mPanelHeight = ta.getDimensionPixelSize(R.styleable.SlidingUpPanelLayout_umanoPanelHeight, -1);
mShadowHeight = ta.getDimensionPixelSize(R.styleable.SlidingUpPanelLayout_umanoShadowHeight, -1);
mParallaxOffset = ta.getDimensionPixelSize(R.styleable.SlidingUpPanelLayout_umanoParallaxOffset, -1);
mMinFlingVelocity = ta.getInt(R.styleable.SlidingUpPanelLayout_umanoFlingVelocity, DEFAULT_MIN_FLING_VELOCITY);
mCoveredFadeColor = ta.getColor(R.styleable.SlidingUpPanelLayout_umanoFadeColor, DEFAULT_FADE_COLOR);
mDragViewResId = ta.getResourceId(R.styleable.SlidingUpPanelLayout_umanoDragView, -1);
mScrollableViewResId = ta.getResourceId(R.styleable.SlidingUpPanelLayout_umanoScrollableView, -1);
mOverlayContent = ta.getBoolean(R.styleable.SlidingUpPanelLayout_umanoOverlay, DEFAULT_OVERLAY_FLAG);
mClipPanel = ta.getBoolean(R.styleable.SlidingUpPanelLayout_umanoClipPanel, DEFAULT_CLIP_PANEL_FLAG);
mAnchorPoint = ta.getFloat(R.styleable.SlidingUpPanelLayout_umanoAnchorPoint, DEFAULT_ANCHOR_POINT);
mSlideState = PanelState.values()[ta.getInt(R.styleable.SlidingUpPanelLayout_umanoInitialState, DEFAULT_SLIDE_STATE.ordinal())];
int interpolatorResId = ta.getResourceId(R.styleable.SlidingUpPanelLayout_umanoScrollInterpolator, -1);
if (interpolatorResId != -1) {
scrollerInterpolator = AnimationUtils.loadInterpolator(context, interpolatorResId);
}
ta.recycle();
}
}
final float density = context.getResources().getDisplayMetrics().density;
if (mPanelHeight == -1) {
mPanelHeight = (int) (DEFAULT_PANEL_HEIGHT * density + 0.5f);
}
if (mShadowHeight == -1) {
mShadowHeight = (int) (DEFAULT_SHADOW_HEIGHT * density + 0.5f);
}
if (mParallaxOffset == -1) {
mParallaxOffset = (int) (DEFAULT_PARALLAX_OFFSET * density);
}
// If the shadow height is zero, don't show the shadow
if (mShadowHeight > 0) {
if (mIsSlidingUp) {
mShadowDrawable = getResources().getDrawable(R.drawable.above_shadow);
} else {
mShadowDrawable = getResources().getDrawable(R.drawable.below_shadow);
}
} else {
mShadowDrawable = null;
}
setWillNotDraw(false);
mDragHelper = ViewDragHelper.create(this, 0.5f, scrollerInterpolator, new DragHelperCallback());
mDragHelper.setMinVelocity(mMinFlingVelocity * density);
mIsTouchEnabled = true;
}
/**
* Set the Drag View after the view is inflated
*/
@Override
protected void onFinishInflate() {
super.onFinishInflate();
if (mDragViewResId != -1) {
setDragView(findViewById(mDragViewResId));
}
if (mScrollableViewResId != -1) {
setScrollableView(findViewById(mScrollableViewResId));
}
}
public void setGravity(int gravity) {
if (gravity != Gravity.TOP && gravity != Gravity.BOTTOM) {
throw new IllegalArgumentException("gravity must be set to either top or bottom");
}
mIsSlidingUp = gravity == Gravity.BOTTOM;
if (!mFirstLayout) {
requestLayout();
}
}
/**
* Set the color used to fade the pane covered by the sliding pane out when the pane
* will become fully covered in the expanded state.
*
* @param color An ARGB-packed color value
*/
public void setCoveredFadeColor(int color) {
mCoveredFadeColor = color;
requestLayout();
}
/**
* @return The ARGB-packed color value used to fade the fixed pane
*/
public int getCoveredFadeColor() {
return mCoveredFadeColor;
}
/**
* Set sliding enabled flag
*
* @param enabled flag value
*/
public void setTouchEnabled(boolean enabled) {
mIsTouchEnabled = enabled;
}
public boolean isTouchEnabled() {
return mIsTouchEnabled && mSlideableView != null && mSlideState != PanelState.HIDDEN;
}
/**
* Set the collapsed panel height in pixels
*
* @param val A height in pixels
*/
public void setPanelHeight(int val) {
if (getPanelHeight() == val) {
return;
}
mPanelHeight = val;
if (!mFirstLayout) {
requestLayout();
}
if (getPanelState() == PanelState.COLLAPSED) {
smoothToBottom();
invalidate();
return;
}
}
protected void smoothToBottom() {
smoothSlideTo(0, 0);
}
/**
* @return The current shadow height
*/
public int getShadowHeight() {
return mShadowHeight;
}
/**
* Set the shadow height
*
* @param val A height in pixels
*/
public void setShadowHeight(int val) {
mShadowHeight = val;
if (!mFirstLayout) {
invalidate();
}
}
/**
* @return The current collapsed panel height
*/
public int getPanelHeight() {
return mPanelHeight;
}
/**
* @return The current parallax offset
*/
public int getCurrentParallaxOffset() {
// Clamp slide offset at zero for parallax computation;
int offset = (int) (mParallaxOffset * Math.max(mSlideOffset, 0));
return mIsSlidingUp ? -offset : offset;
}
/**
* Set parallax offset for the panel
*
* @param val A height in pixels
*/
public void setParallaxOffset(int val) {
mParallaxOffset = val;
if (!mFirstLayout) {
requestLayout();
}
}
/**
* @return The current minimin fling velocity
*/
public int getMinFlingVelocity() {
return mMinFlingVelocity;
}
/**
* Sets the minimum fling velocity for the panel
*
* @param val the new value
*/
public void setMinFlingVelocity(int val) {
mMinFlingVelocity = val;
}
/**
* Adds a panel slide listener
*
* @param listener
*/
public void addPanelSlideListener(PanelSlideListener listener) {
synchronized (mPanelSlideListeners) {
mPanelSlideListeners.add(listener);
}
}
/**
* Removes a panel slide listener
*
* @param listener
*/
public void removePanelSlideListener(PanelSlideListener listener) {
synchronized (mPanelSlideListeners) {
mPanelSlideListeners.remove(listener);
}
}
/**
* Provides an on click for the portion of the main view that is dimmed. The listener is not
* triggered if the panel is in a collapsed or a hidden position. If the on click listener is
* not provided, the clicks on the dimmed area are passed through to the main layout.
*
* @param listener
*/
public void setFadeOnClickListener(View.OnClickListener listener) {
mFadeOnClickListener = listener;
}
/**
* Set the draggable view portion. Use to null, to allow the whole panel to be draggable
*
* @param dragView A view that will be used to drag the panel.
*/
public void setDragView(View dragView) {
if (mDragView != null) {
mDragView.setOnClickListener(null);
}
mDragView = dragView;
if (mDragView != null) {
mDragView.setClickable(true);
mDragView.setFocusable(false);
mDragView.setFocusableInTouchMode(false);
mDragView.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View v) {
if (!isEnabled() || !isTouchEnabled()) return;
if (mSlideState != PanelState.EXPANDED && mSlideState != PanelState.ANCHORED) {
if (mAnchorPoint < 1.0f) {
setPanelState(PanelState.ANCHORED);
} else {
setPanelState(PanelState.EXPANDED);
}
} else {
setPanelState(PanelState.COLLAPSED);
}
}
});
;
}
}
/**
* Set the draggable view portion. Use to null, to allow the whole panel to be draggable
*
* @param dragViewResId The resource ID of the new drag view
*/
public void setDragView(int dragViewResId) {
mDragViewResId = dragViewResId;
setDragView(findViewById(dragViewResId));
}
/**
* Set the scrollable child of the sliding layout. If set, scrolling will be transfered between
* the panel and the view when necessary
*
* @param scrollableView The scrollable view
*/
public void setScrollableView(View scrollableView) {
mScrollableView = scrollableView;
}
/**
* Sets the current scrollable view helper. See ScrollableViewHelper description for details.
*
* @param helper
*/
public void setScrollableViewHelper(ScrollableViewHelper helper) {
mScrollableViewHelper = helper;
}
/**
* Set an anchor point where the panel can stop during sliding
*
* @param anchorPoint A value between 0 and 1, determining the position of the anchor point
* starting from the top of the layout.
*/
public void setAnchorPoint(float anchorPoint) {
if (anchorPoint > 0 && anchorPoint <= 1) {
mAnchorPoint = anchorPoint;
mFirstLayout = true;
requestLayout();
}
}
/**
* Gets the currently set anchor point
*
* @return the currently set anchor point
*/
public float getAnchorPoint() {
return mAnchorPoint;
}
/**
* Sets whether or not the panel overlays the content
*
* @param overlayed
*/
public void setOverlayed(boolean overlayed) {
mOverlayContent = overlayed;
}
/**
* Check if the panel is set as an overlay.
*/
public boolean isOverlayed() {
return mOverlayContent;
}
/**
* Sets whether or not the main content is clipped to the top of the panel
*
* @param clip
*/
public void setClipPanel(boolean clip) {
mClipPanel = clip;
}
/**
* Check whether or not the main content is clipped to the top of the panel
*/
public boolean isClipPanel() {
return mClipPanel;
}
void dispatchOnPanelSlide(View panel) {
synchronized (mPanelSlideListeners) {
for (PanelSlideListener l : mPanelSlideListeners) {
l.onPanelSlide(panel, mSlideOffset);
}
}
}
void dispatchOnPanelStateChanged(View panel, PanelState previousState, PanelState newState) {
synchronized (mPanelSlideListeners) {
for (PanelSlideListener l : mPanelSlideListeners) {
l.onPanelStateChanged(panel, previousState, newState);
}
}
sendAccessibilityEvent(AccessibilityEvent.TYPE_WINDOW_STATE_CHANGED);
}
void updateObscuredViewVisibility() {
if (getChildCount() == 0) {
return;
}
final int leftBound = getPaddingLeft();
final int rightBound = getWidth() - getPaddingRight();
final int topBound = getPaddingTop();
final int bottomBound = getHeight() - getPaddingBottom();
final int left;
final int right;
final int top;
final int bottom;
if (mSlideableView != null && hasOpaqueBackground(mSlideableView)) {
left = mSlideableView.getLeft();
right = mSlideableView.getRight();
top = mSlideableView.getTop();
bottom = mSlideableView.getBottom();
} else {
left = right = top = bottom = 0;
}
View child = getChildAt(0);
final int clampedChildLeft = Math.max(leftBound, child.getLeft());
final int clampedChildTop = Math.max(topBound, child.getTop());
final int clampedChildRight = Math.min(rightBound, child.getRight());
final int clampedChildBottom = Math.min(bottomBound, child.getBottom());
final int vis;
if (clampedChildLeft >= left && clampedChildTop >= top &&
clampedChildRight <= right && clampedChildBottom <= bottom) {
vis = INVISIBLE;
} else {
vis = VISIBLE;
}
child.setVisibility(vis);
}
void setAllChildrenVisible() {
for (int i = 0, childCount = getChildCount(); i < childCount; i++) {
final View child = getChildAt(i);
if (child.getVisibility() == INVISIBLE) {
child.setVisibility(VISIBLE);
}
}
}
private static boolean hasOpaqueBackground(View v) {
final Drawable bg = v.getBackground();
return bg != null && bg.getOpacity() == PixelFormat.OPAQUE;
}
@Override
protected void onAttachedToWindow() {
super.onAttachedToWindow();
mFirstLayout = true;
}
@Override
protected void onDetachedFromWindow() {
super.onDetachedFromWindow();
mFirstLayout = true;
}
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
final int widthMode = MeasureSpec.getMode(widthMeasureSpec);
final int widthSize = MeasureSpec.getSize(widthMeasureSpec);
final int heightMode = MeasureSpec.getMode(heightMeasureSpec);
final int heightSize = MeasureSpec.getSize(heightMeasureSpec);
if (widthMode != MeasureSpec.EXACTLY && widthMode != MeasureSpec.AT_MOST) {
throw new IllegalStateException("Width must have an exact value or MATCH_PARENT");
} else if (heightMode != MeasureSpec.EXACTLY && heightMode != MeasureSpec.AT_MOST) {
throw new IllegalStateException("Height must have an exact value or MATCH_PARENT");
}
final int childCount = getChildCount();
if (childCount != 2) {
throw new IllegalStateException("Sliding up panel layout must have exactly 2 children!");
}
mMainView = getChildAt(0);
mSlideableView = getChildAt(1);
if (mDragView == null) {
setDragView(mSlideableView);
}
// If the sliding panel is not visible, then put the whole view in the hidden state
if (mSlideableView.getVisibility() != VISIBLE) {
mSlideState = PanelState.HIDDEN;
}
int layoutHeight = heightSize - getPaddingTop() - getPaddingBottom();
int layoutWidth = widthSize - getPaddingLeft() - getPaddingRight();
// First pass. Measure based on child LayoutParams width/height.
for (int i = 0; i < childCount; i++) {
final View child = getChildAt(i);
final LayoutParams lp = (LayoutParams) child.getLayoutParams();
// We always measure the sliding panel in order to know it's height (needed for show panel)
if (child.getVisibility() == GONE && i == 0) {
continue;
}
int height = layoutHeight;
int width = layoutWidth;
if (child == mMainView) {
if (!mOverlayContent && mSlideState != PanelState.HIDDEN) {
height -= mPanelHeight;
}
width -= lp.leftMargin + lp.rightMargin;
} else if (child == mSlideableView) {
// The slideable view should be aware of its top margin.
// See https://github.com/umano/AndroidSlidingUpPanel/issues/412.
height -= lp.topMargin;
}
int childWidthSpec;
if (lp.width == LayoutParams.WRAP_CONTENT) {
childWidthSpec = MeasureSpec.makeMeasureSpec(width, MeasureSpec.AT_MOST);
} else if (lp.width == LayoutParams.MATCH_PARENT) {
childWidthSpec = MeasureSpec.makeMeasureSpec(width, MeasureSpec.EXACTLY);
} else {
childWidthSpec = MeasureSpec.makeMeasureSpec(lp.width, MeasureSpec.EXACTLY);
}
int childHeightSpec;
if (lp.height == LayoutParams.WRAP_CONTENT) {
childHeightSpec = MeasureSpec.makeMeasureSpec(height, MeasureSpec.AT_MOST);
} else {
// Modify the height based on the weight.
if (lp.weight > 0 && lp.weight < 1) {
height = (int) (height * lp.weight);
} else if (lp.height != LayoutParams.MATCH_PARENT) {
height = lp.height;
}
childHeightSpec = MeasureSpec.makeMeasureSpec(height, MeasureSpec.EXACTLY);
}
child.measure(childWidthSpec, childHeightSpec);
if (child == mSlideableView) {
mSlideRange = mSlideableView.getMeasuredHeight() - mPanelHeight;
}
}
setMeasuredDimension(widthSize, heightSize);
}
@Override
protected void onLayout(boolean changed, int l, int t, int r, int b) {
final int paddingLeft = getPaddingLeft();
final int paddingTop = getPaddingTop();
final int childCount = getChildCount();
if (mFirstLayout) {
switch (mSlideState) {
case EXPANDED:
mSlideOffset = 1.0f;
break;
case ANCHORED:
mSlideOffset = mAnchorPoint;
break;
case HIDDEN:
int newTop = computePanelTopPosition(0.0f) + (mIsSlidingUp ? +mPanelHeight : -mPanelHeight);
mSlideOffset = computeSlideOffset(newTop);
break;
default:
mSlideOffset = 0.f;
break;
}
}
for (int i = 0; i < childCount; i++) {
final View child = getChildAt(i);
final LayoutParams lp = (LayoutParams) child.getLayoutParams();
// Always layout the sliding view on the first layout
if (child.getVisibility() == GONE && (i == 0 || mFirstLayout)) {
continue;
}
final int childHeight = child.getMeasuredHeight();
int childTop = paddingTop;
if (child == mSlideableView) {
childTop = computePanelTopPosition(mSlideOffset);
}
if (!mIsSlidingUp) {
if (child == mMainView && !mOverlayContent) {
childTop = computePanelTopPosition(mSlideOffset) + mSlideableView.getMeasuredHeight();
}
}
final int childBottom = childTop + childHeight;
final int childLeft = paddingLeft + lp.leftMargin;
final int childRight = childLeft + child.getMeasuredWidth();
child.layout(childLeft, childTop, childRight, childBottom);
}
if (mFirstLayout) {
updateObscuredViewVisibility();
}
applyParallaxForCurrentSlideOffset();
mFirstLayout = false;
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
// Recalculate sliding panes and their details
if (h != oldh) {
mFirstLayout = true;
}
}
@Override
public boolean onInterceptTouchEvent(MotionEvent ev) {
// If the scrollable view is handling touch, never intercept
if (mIsScrollableViewHandlingTouch || !isTouchEnabled()) {
mDragHelper.abort();
return false;
}
final int action = MotionEventCompat.getActionMasked(ev);
final float x = ev.getX();
final float y = ev.getY();
final float adx = Math.abs(x - mInitialMotionX);
final float ady = Math.abs(y - mInitialMotionY);
final int dragSlop = mDragHelper.getTouchSlop();
switch (action) {
case MotionEvent.ACTION_DOWN: {
mIsUnableToDrag = false;
mInitialMotionX = x;
mInitialMotionY = y;
if (!isViewUnder(mDragView, (int) x, (int) y)) {
mDragHelper.cancel();
mIsUnableToDrag = true;
return false;
}
break;
}
case MotionEvent.ACTION_MOVE: {
if (ady > dragSlop && adx > ady) {
mDragHelper.cancel();
mIsUnableToDrag = true;
return false;
}
break;
}
case MotionEvent.ACTION_CANCEL:
case MotionEvent.ACTION_UP:
// If the dragView is still dragging when we get here, we need to call processTouchEvent
// so that the view is settled
// Added to make scrollable views work (tokudu)
if (mDragHelper.isDragging()) {
mDragHelper.processTouchEvent(ev);
return true;
}
// Check if this was a click on the faded part of the screen, and fire off the listener if there is one.
if (ady <= dragSlop
&& adx <= dragSlop
&& mSlideOffset > 0 && !isViewUnder(mSlideableView, (int) mInitialMotionX, (int) mInitialMotionY) && mFadeOnClickListener != null) {
playSoundEffect(android.view.SoundEffectConstants.CLICK);
mFadeOnClickListener.onClick(this);
return true;
}
break;
}
return mDragHelper.shouldInterceptTouchEvent(ev);
}
@Override
public boolean onTouchEvent(MotionEvent ev) {
if (!isEnabled() || !isTouchEnabled()) {
return super.onTouchEvent(ev);
}
try {
mDragHelper.processTouchEvent(ev);
return true;
} catch (Exception ex) {
// Ignore the pointer out of range exception
return false;
}
}
@Override
public boolean dispatchTouchEvent(MotionEvent ev) {
final int action = MotionEventCompat.getActionMasked(ev);
if (!isEnabled() || !isTouchEnabled() || (mIsUnableToDrag && action != MotionEvent.ACTION_DOWN)) {
mDragHelper.abort();
return super.dispatchTouchEvent(ev);
}
final float x = ev.getX();
final float y = ev.getY();
if (action == MotionEvent.ACTION_DOWN) {
mIsScrollableViewHandlingTouch = false;
mPrevMotionX = x;
mPrevMotionY = y;
} else if (action == MotionEvent.ACTION_MOVE) {
float dx = x - mPrevMotionX;
float dy = y - mPrevMotionY;
mPrevMotionX = x;
mPrevMotionY = y;
if (Math.abs(dx) > Math.abs(dy)) {
// Scrolling horizontally, so ignore
return super.dispatchTouchEvent(ev);
}
// If the scroll view isn't under the touch, pass the
// event along to the dragView.
if (!isViewUnder(mScrollableView, (int) mInitialMotionX, (int) mInitialMotionY)) {
return super.dispatchTouchEvent(ev);
}
// Which direction (up or down) is the drag moving?
if (dy * (mIsSlidingUp ? 1 : -1) > 0) { // Collapsing
// Is the child less than fully scrolled?
// Then let the child handle it.
if (mScrollableViewHelper.getScrollableViewScrollPosition(mScrollableView, mIsSlidingUp) > 0) {
mIsScrollableViewHandlingTouch = true;
return super.dispatchTouchEvent(ev);
}
// Was the child handling the touch previously?
// Then we need to rejigger things so that the
// drag panel gets a proper down event.
if (mIsScrollableViewHandlingTouch) {
// Send an 'UP' event to the child.
MotionEvent up = MotionEvent.obtain(ev);
up.setAction(MotionEvent.ACTION_CANCEL);
super.dispatchTouchEvent(up);
up.recycle();
// Send a 'DOWN' event to the panel. (We'll cheat
// and hijack this one)
ev.setAction(MotionEvent.ACTION_DOWN);
}
mIsScrollableViewHandlingTouch = false;
return this.onTouchEvent(ev);
} else if (dy * (mIsSlidingUp ? 1 : -1) < 0) { // Expanding
// Is the panel less than fully expanded?
// Then we'll handle the drag here.
if (mSlideOffset < 1.0f) {
mIsScrollableViewHandlingTouch = false;
return this.onTouchEvent(ev);
}
// Was the panel handling the touch previously?
// Then we need to rejigger things so that the
// child gets a proper down event.
if (!mIsScrollableViewHandlingTouch && mDragHelper.isDragging()) {
mDragHelper.cancel();
ev.setAction(MotionEvent.ACTION_DOWN);
}
mIsScrollableViewHandlingTouch = true;
return super.dispatchTouchEvent(ev);
}
} else if (action == MotionEvent.ACTION_UP) {
// If the scrollable view was handling the touch and we receive an up
// we want to clear any previous dragging state so we don't intercept a touch stream accidentally
if (mIsScrollableViewHandlingTouch) {
mDragHelper.setDragState(ViewDragHelper.STATE_IDLE);
}
}
// In all other cases, just let the default behavior take over.
return super.dispatchTouchEvent(ev);
}
private boolean isViewUnder(View view, int x, int y) {
if (view == null) return false;
int[] viewLocation = new int[2];
view.getLocationOnScreen(viewLocation);
int[] parentLocation = new int[2];
this.getLocationOnScreen(parentLocation);
int screenX = parentLocation[0] + x;
int screenY = parentLocation[1] + y;
return screenX >= viewLocation[0] && screenX < viewLocation[0] + view.getWidth() &&
screenY >= viewLocation[1] && screenY < viewLocation[1] + view.getHeight();
}
/*
* Computes the top position of the panel based on the slide offset.
*/
private int computePanelTopPosition(float slideOffset) {
int slidingViewHeight = mSlideableView != null ? mSlideableView.getMeasuredHeight() : 0;
int slidePixelOffset = (int) (slideOffset * mSlideRange);
// Compute the top of the panel if its collapsed
return mIsSlidingUp
? getMeasuredHeight() - getPaddingBottom() - mPanelHeight - slidePixelOffset
: getPaddingTop() - slidingViewHeight + mPanelHeight + slidePixelOffset;
}
/*
* Computes the slide offset based on the top position of the panel
*/
private float computeSlideOffset(int topPosition) {
// Compute the panel top position if the panel is collapsed (offset 0)
final int topBoundCollapsed = computePanelTopPosition(0);
// Determine the new slide offset based on the collapsed top position and the new required
// top position
return (mIsSlidingUp
? (float) (topBoundCollapsed - topPosition) / mSlideRange
: (float) (topPosition - topBoundCollapsed) / mSlideRange);
}
/**
* Returns the current state of the panel as an enum.
*
* @return the current panel state
*/
public PanelState getPanelState() {
return mSlideState;
}
/**
* Change panel state to the given state with
*
* @param state - new panel state
*/
public void setPanelState(PanelState state) {
// Abort any running animation, to allow state change
if(mDragHelper.getViewDragState() == ViewDragHelper.STATE_SETTLING){
Log.d(TAG, "View is settling. Aborting animation.");
mDragHelper.abort();
}
if (state == null || state == PanelState.DRAGGING) {
throw new IllegalArgumentException("Panel state cannot be null or DRAGGING.");
}
if (!isEnabled()
|| (!mFirstLayout && mSlideableView == null)
|| state == mSlideState
|| mSlideState == PanelState.DRAGGING) return;
if (mFirstLayout) {
setPanelStateInternal(state);
} else {
if (mSlideState == PanelState.HIDDEN) {
mSlideableView.setVisibility(View.VISIBLE);
requestLayout();
}
switch (state) {
case ANCHORED:
smoothSlideTo(mAnchorPoint, 0);
break;
case COLLAPSED:
smoothSlideTo(0, 0);
break;
case EXPANDED:
smoothSlideTo(1.0f, 0);
break;
case HIDDEN:
int newTop = computePanelTopPosition(0.0f) + (mIsSlidingUp ? +mPanelHeight : -mPanelHeight);
smoothSlideTo(computeSlideOffset(newTop), 0);
break;
}
}
}
private void setPanelStateInternal(PanelState state) {
if (mSlideState == state) return;
PanelState oldState = mSlideState;
mSlideState = state;
dispatchOnPanelStateChanged(this, oldState, state);
}
/**
* Update the parallax based on the current slide offset.
*/
@SuppressLint("NewApi")
private void applyParallaxForCurrentSlideOffset() {
if (mParallaxOffset > 0) {
int mainViewOffset = getCurrentParallaxOffset();
ViewCompat.setTranslationY(mMainView, mainViewOffset);
}
}
private void onPanelDragged(int newTop) {
if (mSlideState != PanelState.DRAGGING) {
mLastNotDraggingSlideState = mSlideState;
}
setPanelStateInternal(PanelState.DRAGGING);
// Recompute the slide offset based on the new top position
mSlideOffset = computeSlideOffset(newTop);
applyParallaxForCurrentSlideOffset();
// Dispatch the slide event
dispatchOnPanelSlide(mSlideableView);
// If the slide offset is negative, and overlay is not on, we need to increase the
// height of the main content
LayoutParams lp = (LayoutParams) mMainView.getLayoutParams();
int defaultHeight = getHeight() - getPaddingBottom() - getPaddingTop() - mPanelHeight;
if (mSlideOffset <= 0 && !mOverlayContent) {
// expand the main view
lp.height = mIsSlidingUp ? (newTop - getPaddingBottom()) : (getHeight() - getPaddingBottom() - mSlideableView.getMeasuredHeight() - newTop);
if (lp.height == defaultHeight) {
lp.height = LayoutParams.MATCH_PARENT;
}
mMainView.requestLayout();
} else if (lp.height != LayoutParams.MATCH_PARENT && !mOverlayContent) {
lp.height = LayoutParams.MATCH_PARENT;
mMainView.requestLayout();
}
}
@Override
protected boolean drawChild(Canvas canvas, View child, long drawingTime) {
boolean result;
final int save = canvas.save(Canvas.CLIP_SAVE_FLAG);
if (mSlideableView != null && mSlideableView != child) { // if main view
// Clip against the slider; no sense drawing what will immediately be covered,
// Unless the panel is set to overlay content
canvas.getClipBounds(mTmpRect);
if (!mOverlayContent) {
if (mIsSlidingUp) {
mTmpRect.bottom = Math.min(mTmpRect.bottom, mSlideableView.getTop());
} else {
mTmpRect.top = Math.max(mTmpRect.top, mSlideableView.getBottom());
}
}
if (mClipPanel) {
canvas.clipRect(mTmpRect);
}
result = super.drawChild(canvas, child, drawingTime);
if (mCoveredFadeColor != 0 && mSlideOffset > 0) {
final int baseAlpha = (mCoveredFadeColor & 0xff000000) >>> 24;
final int imag = (int) (baseAlpha * mSlideOffset);
final int color = imag << 24 | (mCoveredFadeColor & 0xffffff);
mCoveredFadePaint.setColor(color);
canvas.drawRect(mTmpRect, mCoveredFadePaint);
}
} else {
result = super.drawChild(canvas, child, drawingTime);
}
canvas.restoreToCount(save);
return result;
}
/**
* Smoothly animate mDraggingPane to the target X position within its range.
*
* @param slideOffset position to animate to
* @param velocity initial velocity in case of fling, or 0.
*/
boolean smoothSlideTo(float slideOffset, int velocity) {
if (!isEnabled() || mSlideableView == null) {
// Nothing to do.
return false;
}
int panelTop = computePanelTopPosition(slideOffset);
if (mDragHelper.smoothSlideViewTo(mSlideableView, mSlideableView.getLeft(), panelTop)) {
setAllChildrenVisible();
ViewCompat.postInvalidateOnAnimation(this);
return true;
}
return false;
}
@Override
public void computeScroll() {
if (mDragHelper != null && mDragHelper.continueSettling(true)) {
if (!isEnabled()) {
mDragHelper.abort();
return;
}
ViewCompat.postInvalidateOnAnimation(this);
}
}
@Override
public void draw(Canvas c) {
super.draw(c);
// draw the shadow
if (mShadowDrawable != null && mSlideableView != null) {
final int right = mSlideableView.getRight();
final int top;
final int bottom;
if (mIsSlidingUp) {
top = mSlideableView.getTop() - mShadowHeight;
bottom = mSlideableView.getTop();
} else {
top = mSlideableView.getBottom();
bottom = mSlideableView.getBottom() + mShadowHeight;
}
final int left = mSlideableView.getLeft();
mShadowDrawable.setBounds(left, top, right, bottom);
mShadowDrawable.draw(c);
}
}
/**
* Tests scrollability within child views of v given a delta of dx.
*
* @param v View to test for horizontal scrollability
* @param checkV Whether the view v passed should itself be checked for scrollability (true),
* or just its children (false).
* @param dx Delta scrolled in pixels
* @param x X coordinate of the active touch point
* @param y Y coordinate of the active touch point
* @return true if child views of v can be scrolled by delta of dx.
*/
protected boolean canScroll(View v, boolean checkV, int dx, int x, int y) {
if (v instanceof ViewGroup) {
final ViewGroup group = (ViewGroup) v;
final int scrollX = v.getScrollX();
final int scrollY = v.getScrollY();
final int count = group.getChildCount();
// Count backwards - let topmost views consume scroll distance first.
for (int i = count - 1; i >= 0; i--) {
final View child = group.getChildAt(i);
if (x + scrollX >= child.getLeft() && x + scrollX < child.getRight() &&
y + scrollY >= child.getTop() && y + scrollY < child.getBottom() &&
canScroll(child, true, dx, x + scrollX - child.getLeft(),
y + scrollY - child.getTop())) {
return true;
}
}
}
return checkV && ViewCompat.canScrollHorizontally(v, -dx);
}
@Override
protected ViewGroup.LayoutParams generateDefaultLayoutParams() {
return new LayoutParams();
}
@Override
protected ViewGroup.LayoutParams generateLayoutParams(ViewGroup.LayoutParams p) {
return p instanceof MarginLayoutParams
? new LayoutParams((MarginLayoutParams) p)
: new LayoutParams(p);
}
@Override
protected boolean checkLayoutParams(ViewGroup.LayoutParams p) {
return p instanceof LayoutParams && super.checkLayoutParams(p);
}
@Override
public ViewGroup.LayoutParams generateLayoutParams(AttributeSet attrs) {
return new LayoutParams(getContext(), attrs);
}
@Override
public Parcelable onSaveInstanceState() {
Bundle bundle = new Bundle();
bundle.putParcelable("superState", super.onSaveInstanceState());
bundle.putSerializable(SLIDING_STATE, mSlideState != PanelState.DRAGGING ? mSlideState : mLastNotDraggingSlideState);
return bundle;
}
@Override
public void onRestoreInstanceState(Parcelable state) {
if (state instanceof Bundle) {
Bundle bundle = (Bundle) state;
mSlideState = (PanelState) bundle.getSerializable(SLIDING_STATE);
mSlideState = mSlideState == null ? DEFAULT_SLIDE_STATE : mSlideState;
state = bundle.getParcelable("superState");
}
super.onRestoreInstanceState(state);
}
private class DragHelperCallback extends ViewDragHelper.Callback {
@Override
public boolean tryCaptureView(View child, int pointerId) {
return !mIsUnableToDrag && child == mSlideableView;
}
@Override
public void onViewDragStateChanged(int state) {
if (mDragHelper != null && mDragHelper.getViewDragState() == ViewDragHelper.STATE_IDLE) {
mSlideOffset = computeSlideOffset(mSlideableView.getTop());
applyParallaxForCurrentSlideOffset();
if (mSlideOffset == 1) {
updateObscuredViewVisibility();
setPanelStateInternal(PanelState.EXPANDED);
} else if (mSlideOffset == 0) {
setPanelStateInternal(PanelState.COLLAPSED);
} else if (mSlideOffset < 0) {
setPanelStateInternal(PanelState.HIDDEN);
mSlideableView.setVisibility(View.INVISIBLE);
} else {
updateObscuredViewVisibility();
setPanelStateInternal(PanelState.ANCHORED);
}
}
}
@Override
public void onViewCaptured(View capturedChild, int activePointerId) {
setAllChildrenVisible();
}
@Override
public void onViewPositionChanged(View changedView, int left, int top, int dx, int dy) {
onPanelDragged(top);
invalidate();
}
@Override
public void onViewReleased(View releasedChild, float xvel, float yvel) {
int target = 0;
// direction is always positive if we are sliding in the expanded direction
float direction = mIsSlidingUp ? -yvel : yvel;
if (direction > 0 && mSlideOffset <= mAnchorPoint) {
// swipe up -> expand and stop at anchor point
target = computePanelTopPosition(mAnchorPoint);
} else if (direction > 0 && mSlideOffset > mAnchorPoint) {
// swipe up past anchor -> expand
target = computePanelTopPosition(1.0f);
} else if (direction < 0 && mSlideOffset >= mAnchorPoint) {
// swipe down -> collapse and stop at anchor point
target = computePanelTopPosition(mAnchorPoint);
} else if (direction < 0 && mSlideOffset < mAnchorPoint) {
// swipe down past anchor -> collapse
target = computePanelTopPosition(0.0f);
} else if (mSlideOffset >= (1.f + mAnchorPoint) / 2) {
// zero velocity, and far enough from anchor point => expand to the top
target = computePanelTopPosition(1.0f);
} else if (mSlideOffset >= mAnchorPoint / 2) {
// zero velocity, and close enough to anchor point => go to anchor
target = computePanelTopPosition(mAnchorPoint);
} else {
// settle at the bottom
target = computePanelTopPosition(0.0f);
}
if (mDragHelper != null) {
mDragHelper.settleCapturedViewAt(releasedChild.getLeft(), target);
}
invalidate();
}
@Override
public int getViewVerticalDragRange(View child) {
return mSlideRange;
}
@Override
public int clampViewPositionVertical(View child, int top, int dy) {
final int collapsedTop = computePanelTopPosition(0.f);
final int expandedTop = computePanelTopPosition(1.0f);
if (mIsSlidingUp) {
return Math.min(Math.max(top, expandedTop), collapsedTop);
} else {
return Math.min(Math.max(top, collapsedTop), expandedTop);
}
}
}
public static class LayoutParams extends ViewGroup.MarginLayoutParams {
private static final int[] ATTRS = new int[]{
android.R.attr.layout_weight
};
public float weight = 0;
public LayoutParams() {
super(MATCH_PARENT, MATCH_PARENT);
}
public LayoutParams(int width, int height) {
super(width, height);
}
public LayoutParams(int width, int height, float weight) {
super(width, height);
this.weight = weight;
}
public LayoutParams(android.view.ViewGroup.LayoutParams source) {
super(source);
}
public LayoutParams(MarginLayoutParams source) {
super(source);
}
public LayoutParams(LayoutParams source) {
super(source);
}
public LayoutParams(Context c, AttributeSet attrs) {
super(c, attrs);
final TypedArray ta = c.obtainStyledAttributes(attrs, ATTRS);
if (ta != null) {
this.weight = ta.getFloat(0, 0);
ta.recycle();
}
}
}
}
| umano/AndroidSlidingUpPanel | library/src/main/java/com/sothree/slidinguppanel/SlidingUpPanelLayout.java |
1,462 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package groovy.lang;
import groovy.security.GroovyCodeSourcePermission;
import groovy.util.CharsetToolkit;
import java.io.*;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.security.AccessController;
import java.security.CodeSource;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.security.cert.Certificate;
import org.codehaus.groovy.runtime.IOGroovyMethods;
import org.codehaus.groovy.runtime.ResourceGroovyMethods;
/**
* CodeSource wrapper class that allows specific security policies to be associated with a class
* compiled from groovy source.
*
* @author Steve Goetze
* @author Guillaume Laforge
* @author Merlyn Albery-Speyer
*/
public class GroovyCodeSource {
/**
* The codeSource to be given the generated class. This can be used by policy file
* grants to administer security.
*/
private CodeSource codeSource;
/**
* The name given to the generated class
*/
private String name;
/**
* The groovy source to be compiled and turned into a class
*/
private String scriptText;
/**
* The certificates used to sign the items from the codesource
*/
Certificate[] certs;
private boolean cachable;
private File file;
private URL url;
public GroovyCodeSource(String script, String name, String codeBase) {
this.name = name;
this.scriptText = script;
this.codeSource = createCodeSource(codeBase);
this.cachable = true;
}
/**
* Construct a GroovyCodeSource for an inputStream of groovyCode that has an
* unknown provenance -- meaning it didn't come from a File or a URL (e.g. a String).
* The supplied codeBase will be used to construct a File URL that should match up
* with a java Policy entry that determines the grants to be associated with the
* class that will be built from the InputStream.
* <p>
* The permission groovy.security.GroovyCodeSourcePermission will be used to determine if the given codeBase
* may be specified. That is, the current Policy set must have a GroovyCodeSourcePermission that implies
* the codeBase, or an exception will be thrown. This is to prevent callers from hijacking
* existing codeBase policy entries unless explicitly authorized by the user.
*/
public GroovyCodeSource(Reader reader, String name, String codeBase) {
this.name = name;
this.codeSource = createCodeSource(codeBase);
try {
this.scriptText = IOGroovyMethods.getText(reader);
} catch (IOException e) {
throw new RuntimeException("Impossible to read the text content from that reader, for script: " + name + " with codeBase: " + codeBase, e);
}
}
public GroovyCodeSource(final File infile, final String encoding) throws IOException {
// avoid files which confuse us like ones with .. in path
final File file = new File(infile.getCanonicalPath());
if (!file.exists()) {
throw new FileNotFoundException(file.toString() + " (" + file.getAbsolutePath() + ")");
}
if (file.isDirectory()) {
throw new IllegalArgumentException(file.toString() + " (" + file.getAbsolutePath() + ") is a directory not a Groovy source file.");
}
try {
if (!file.canRead())
throw new RuntimeException(file.toString() + " can not be read. Check the read permission of the file \"" + file.toString() + "\" (" + file.getAbsolutePath() + ").");
}
catch (SecurityException e) {
throw e;
}
this.file = file;
this.cachable = true;
//The calls below require access to user.dir - allow here since getName() and getCodeSource() are
//package private and used only by the GroovyClassLoader.
try {
Object[] info = AccessController.doPrivileged(new PrivilegedExceptionAction<Object[]>() {
public Object[] run() throws IOException {
// retrieve the content of the file using the provided encoding
if (encoding != null) {
scriptText = ResourceGroovyMethods.getText(infile, encoding);
} else {
scriptText = ResourceGroovyMethods.getText(infile);
}
Object[] info = new Object[2];
URL url = file.toURI().toURL();
info[0] = url.toExternalForm();
//toURI().toURL() will encode, but toURL() will not.
info[1] = new CodeSource(url, (Certificate[]) null);
return info;
}
});
this.name = (String) info[0];
this.codeSource = (CodeSource) info[1];
} catch (PrivilegedActionException pae) {
Throwable cause = pae.getCause();
if (cause != null && cause instanceof IOException) {
throw (IOException) cause;
}
throw new RuntimeException("Could not construct CodeSource for file: " + file, cause);
}
}
/**
* @param infile the file to create a GroovyCodeSource for.
* @throws IOException if an issue arises opening and reading the file.
*/
public GroovyCodeSource(final File infile) throws IOException {
this(infile, CharsetToolkit.getDefaultSystemCharset().name());
}
public GroovyCodeSource(URI uri) throws IOException {
this(uri.toURL());
}
public GroovyCodeSource(URL url) throws IOException {
if (url == null) {
throw new RuntimeException("Could not construct a GroovyCodeSource from a null URL");
}
this.url = url;
// TODO: GROOVY-6561: GroovyMain got the name this way: script.substring(script.lastIndexOf("/") + 1)
this.name = url.toExternalForm();
this.codeSource = new CodeSource(url, (java.security.cert.Certificate[]) null);
try {
String contentEncoding = url.openConnection().getContentEncoding();
if (contentEncoding != null) {
this.scriptText = ResourceGroovyMethods.getText(url, contentEncoding);
} else {
this.scriptText = ResourceGroovyMethods.getText(url); // falls-back on default encoding
}
} catch (IOException e) {
throw new RuntimeException("Impossible to read the text content from " + name, e);
}
}
CodeSource getCodeSource() {
return codeSource;
}
public String getScriptText() {
return scriptText;
}
public String getName() {
return name;
}
public File getFile() {
return file;
}
public URL getURL() {
return url;
}
public void setCachable(boolean b) {
cachable = b;
}
public boolean isCachable() {
return cachable;
}
private static CodeSource createCodeSource(final String codeBase) {
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new GroovyCodeSourcePermission(codeBase));
}
try {
return new CodeSource(new URL("file", "", codeBase), (java.security.cert.Certificate[]) null);
}
catch (MalformedURLException e) {
throw new RuntimeException("A CodeSource file URL cannot be constructed from the supplied codeBase: " + codeBase);
}
}
}
| groovy/groovy-core | src/main/groovy/lang/GroovyCodeSource.java |
1,463 | /*
* Original work Copyright 2019 The Moby Project.
* Modified work Copyright (c) 2008-2024, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.instance.impl;
import java.util.Arrays;
import java.util.Collections;
/**
* Java port of the Moby Project random name generator (https://github.com/moby/moby).
*/
public final class MobyNames {
public static final String MOBY_NAMING_PREFIX = "hazelcast.internal.member.naming.moby.prefix";
private static final String NAME_FORMAT = "%s_%s";
private static final String[] LEFT = {"admiring", "adoring", "affectionate", "agitated", "amazing", "angry", "awesome",
"blissful", "boring", "brave", "charming", "clever", "cool", "compassionate",
"competent", "condescending", "confident", "cranky", "crazy", "dazzling", "determined",
"distracted", "dreamy", "eager", "ecstatic", "elastic", "elated", "elegant", "eloquent",
"epic", "fervent", "festive", "flamboyant", "focused", "friendly", "frosty", "gallant",
"gifted", "goofy", "gracious", "happy", "hardcore", "heuristic", "hopeful", "hungry",
"infallible", "inspiring", "jolly", "jovial", "keen", "kind", "laughing", "loving",
"lucid", "magical", "mystifying", "modest", "musing", "naughty", "nervous", "nifty",
"nostalgic", "objective", "optimistic", "peaceful", "pedantic", "pensive", "practical",
"priceless", "quirky", "quizzical", "recursing", "relaxed", "reverent", "romantic",
"sad", "serene", "sharp", "silly", "sleepy", "stoic", "stupefied", "suspicious",
"sweet", "tender", "thirsty", "trusting", "unruffled", "upbeat", "vibrant", "vigilant",
"vigorous", "wizardly", "wonderful", "xenodochial", "youthful", "zealous", "zen"};
// Docker, starting from 0.7.x, generates names from notable scientists and hackers.
// Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa.
private static final String[] RIGHT = {
// Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy.
// https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB
"albattani",
// Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of
// the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen
"allen",
// June Almeida - Scottish virologist who took the first pictures of the rubella virus
// - https://en.wikipedia.org/wiki/June_Almeida
"almeida",
// Kathleen Antonelli, American computer programmer and one of the six original programmers of the ENIAC
// - https://en.wikipedia.org/wiki/Kathleen_Antonelli
"antonelli",
// Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman
// to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University.
// https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi
"agnesi",
// Archimedes was a physicist, engineer and mathematician who invented too many things to list them here.
// https://en.wikipedia.org/wiki/Archimedes
"archimedes",
// Maria Ardinghelli - Italian translator, mathematician and physicist
// - https://en.wikipedia.org/wiki/Maria_Ardinghelli
"ardinghelli",
// Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata
"aryabhata",
// Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the
// US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin
"austin",
// Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage.
"babbage",
// Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis.
// https://en.wikipedia.org/wiki/Stefan_Banach
"banach",
// Buckaroo Banzai and his mentor Dr. Hikita perfectd the "oscillation overthruster", a device that allows one
// to pass through solid matter.
// - https://en.wikipedia.org/wiki/The_Adventures_of_Buckaroo_Banzai_Across_the_8th_Dimension
"banzai",
// John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen
"bardeen",
// Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer.
// https://en.wikipedia.org/wiki/Jean_Bartik
"bartik",
// Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi
"bassi",
// Hugh Beaver, British engineer, founder of the Guinness Book of World Records
// https://en.wikipedia.org/wiki/Hugh_Beaver
"beaver",
// Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with
// inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell
"bell",
// Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar.
// https://en.wikipedia.org/wiki/Karl_Benz
"benz",
// Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata
// Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"
// - https://en.wikipedia.org/wiki/Homi_J._Bhabha
"bhabha",
// Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz
// by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus
"bhaskara",
// Sue Black - British computer scientist and campaigner. She has been instrumental in saving
// Bletchley Park, the site of World War II codebreaking -
// https://en.wikipedia.org/wiki/Sue_Black_(computer_scientist)
"black",
// Elizabeth Helen Blackburn - Australian-American Nobel laureate; best known for co-discovering telomerase.
// https://en.wikipedia.org/wiki/Elizabeth_Blackburn
"blackburn",
// Elizabeth Blackwell - American doctor and first American woman to receive a medical degree
// - https://en.wikipedia.org/wiki/Elizabeth_Blackwell
"blackwell",
// Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr.
"bohr",
// Kathleen Booth, she's credited with writing the first assembly language.
// https://en.wikipedia.org/wiki/Kathleen_Booth
"booth",
// Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT).
// https://en.wikipedia.org/wiki/Anita_Borg
"borg",
// Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the
// Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose
"bose",
// Katherine Louise Bouman is an imaging scientist and Assistant Professor of Computer Science at the
// California Institute of Technology. She researches computational methods for imaging, and developed an
// algorithm that made possible the picture first visualization of a black hole using the
// Event Horizon Telescope. - https://en.wikipedia.org/wiki/Katie_Bouman
"bouman",
// Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she
// earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville
"boyd",
// Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero
// - https://en.wikipedia.org/wiki/Brahmagupta#Zero
"brahmagupta",
// Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain
"brattain",
// Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff)
"brown",
// Dame Susan Jocelyn Bell Burnell - discoverer of pulsars while a graduate student, "one of the most significant
// scientific achievements of the 20th Century". - https://en.wikipedia.org/wiki/Jocelyn_Bell_Burnell
"burnell",
// Linda Brown Buck - American biologist and Nobel laureate best known for her genetic and molecular analyses of
// the mechanisms of smell. https://en.wikipedia.org/wiki/Linda_B._Buck
"buck",
// Annie Jump Cannon - pioneering female astronomer who classified hundreds of thousands of stars and created the
// system we use to understand stars today. https://en.wikipedia.org/wiki/Annie_Jump_Cannon
"cannon",
// Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are
// credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson
"carson",
// Dame Mary Lucy Cartwright - British mathematician who was one of the first to study what is now known as chaos
// theory. Also known for Cartwright's theorem which finds applications in signal processing.
// https://en.wikipedia.org/wiki/Mary_Cartwright
"cartwright",
// George Washington Carver - American agricultural scientist and inventor. He was the most prominent black
// scientist of the early 20th century. https://en.wikipedia.org/wiki/George_Washington_Carver
"carver",
// Vinton Gray Cerf - American Internet pioneer, recognised as one of "the fathers of the Internet". With
// Robert Elliot Kahn, he designed TCP and IP, the primary data communication protocols of the Internet and
// other computer networks. https://en.wikipedia.org/wiki/Vint_Cerf
"cerf",
// Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution
// in structures of the stars. He has won nobel prize for physics
// - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar
"chandrasekhar",
// Sergey Alexeyevich Chaplygin was a Russian and Soviet physicist, mathematician, and mechanical engineer.
// He is known for mathematical formulas such as Chaplygin's equation and for a hypothetical substance
// in cosmology called Chaplygin gas, named after him.
// https://en.wikipedia.org/wiki/Sergey_Chaplygin
"chaplygin",
// Émilie du Châtelet - French natural philosopher, mathematician, physicist, and author during the early 1730s,
// known for her translation of and commentary on Isaac Newton's book Principia containing basic laws of physics.
// https://en.wikipedia.org/wiki/%C3%89milie_du_Ch%C3%A2telet
"chatelet",
// Asima Chatterjee was an Indian organic chemist noted for her research on vinca alkaloids, development of drugs
// for treatment of epilepsy and malaria - https://en.wikipedia.org/wiki/Asima_Chatterjee
"chatterjee",
// Pafnuty Chebyshev - Russian mathematician. He is known fo his works on probability, statistics, mechanics,
// analytical geometry and number theory https://en.wikipedia.org/wiki/Pafnuty_Chebyshev
"chebyshev",
// Clifford Christopher Cocks - British mathematician and cryptographer employed by the GCHQ. Invented in 1973
// an equivalent of what is now known as the RSA public-key cryptosystem (Rivest, Shamir and Adleman first
// publicly described RSA in 1978). https://en.wikipedia.org/wiki/Clifford_Cocks
"clifford",
// Bram Cohen - American computer programmer and author of the BitTorrent peer-to-peer protocol.
// https://en.wikipedia.org/wiki/Bram_Cohen
"cohen",
// David Lee Chaum - American computer scientist and cryptographer. Known for his seminal contributions in the
// field of anonymous communication. https://en.wikipedia.org/wiki/David_Chaum
"chaum",
// Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained
// top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke
"clarke",
// Jane Colden - American botanist widely considered the first female American botanist
// - https://en.wikipedia.org/wiki/Jane_Colden
"colden",
// Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel
// Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born
// in Prague. https://en.wikipedia.org/wiki/Gerty_Cori
"cori",
// Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of
// computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray
"cray",
// This entry reflects a husband and wife team who worked together:
// Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure.
// https://en.wikipedia.org/wiki/Joan_Curran
// Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse.
// https://en.wikipedia.org/wiki/Samuel_Curran
"curran",
// Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie.
"curie",
// Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin.
"darwin",
// Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci.
"davinci",
// A. K. (Alexander Keewatin) Dewdney, Canadian mathematician, computer scientist, author and filmmaker. Contributor
// to Scientific American's "Computer Recreations" from 1984 to 1991. Author of Core War (program), The Planiverse,
// The Armchair Universe, The Magic Machine, The New Turing Omnibus, and more.
// https://en.wikipedia.org/wiki/Alexander_Dewdney
"dewdney",
// Satish Dhawan - Indian mathematician and aerospace engineer, known for leading the successful and indigenous
// development of the Indian space programme. https://en.wikipedia.org/wiki/Satish_Dhawan
"dhawan",
// Bailey Whitfield Diffie - American cryptographer and one of the pioneers of public-key cryptography.
// https://en.wikipedia.org/wiki/Whitfield_Diffie
"diffie",
// Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist.
// https://en.wikipedia.org/wiki/Edsger_W._Dijkstra.
"dijkstra",
// Paul Adrien Maurice Dirac - English theoretical physicist who made fundamental contributions to the early
// development of both quantum mechanics and quantum electrodynamics. https://en.wikipedia.org/wiki/Paul_Dirac
"dirac",
// Agnes Meyer Driscoll - American cryptanalyst during World Wars I and II who successfully cryptanalysed a
// number of Japanese ciphers. She was also the co-developer of one of the cipher machines of the US Navy,
// the CM. https://en.wikipedia.org/wiki/Agnes_Meyer_Driscoll
"driscoll",
// Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving
// as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky
"dubinsky",
// Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage
// and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley
"easley",
// Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison
"edison",
// Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein
"einstein",
// Alexandra Asanovna Elbakyan is a Kazakhstani graduate student, computer programmer, internet pirate in
// hiding, and the creator of the site Sci-Hub. Nature has listed her in 2016 in the top ten people
// that mattered in science, and Ars Technica has compared her to Aaron Swartz. -
// https://en.wikipedia.org/wiki/Alexandra_Elbakyan
"elbakyan",
// Taher A. ElGamal - Egyptian cryptographer best known for the ElGamal discrete log cryptosystem and the
// ElGamal digital signature scheme. https://en.wikipedia.org/wiki/Taher_Elgamal
"elgamal",
// Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine
// - https://en.wikipedia.org/wiki/Gertrude_Elion
"elion",
// James Henry Ellis - British engineer and cryptographer employed by the GCHQ. Best known for conceiving
// for the first time, the idea of public-key cryptography. https://en.wikipedia.org/wiki/James_H._Ellis
"ellis",
// Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart
"engelbart",
// Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid
"euclid",
// Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler
"euler",
// Michael Faraday - British scientist who contributed to the study of electromagnetism and electrochemistry.
// https://en.wikipedia.org/wiki/Michael_Faraday
"faraday",
// Horst Feistel - German-born American cryptographer who was one of the earliest non-government researchers to
// study the design and theory of block ciphers. Co-developer of DES and Lucifer. Feistel networks, a symmetric
// structure used in the construction of block ciphers are named after him.
// https://en.wikipedia.org/wiki/Horst_Feistel
"feistel",
// Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat
"fermat",
// Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi.
"fermi",
// Richard Feynman was a key contributor to quantum mechanics and particle physics.
// https://en.wikipedia.org/wiki/Richard_Feynman
"feynman",
// Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod.
"franklin",
// Yuri Alekseyevich Gagarin - Soviet pilot and cosmonaut, best known as the first human to journey into
// outer space. https://en.wikipedia.org/wiki/Yuri_Gagarin
"gagarin",
// Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific
// truth. https://en.wikipedia.org/wiki/Galileo_Galilei
"galileo",
// Évariste Galois - French mathematician whose work laid the foundations of Galois theory and group theory, two
// major branches of abstract algebra, and the subfield of Galois connections, all while still in his late teens.
// https://en.wikipedia.org/wiki/%C3%89variste_Galois
"galois",
// Kadambini Ganguly - Indian physician, known for being the first South Asian female physician, trained in western
// medicine, to graduate in South Asia. https://en.wikipedia.org/wiki/Kadambini_Ganguly
"ganguly",
// William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer,
// and inventor. https://en.wikipedia.org/wiki/Bill_Gates
"gates",
// Johann Carl Friedrich Gauss - German mathematician who made significant contributions to many fields, including
// number theory, algebra, statistics, analysis, differential geometry, geodesy, geophysics, mechanics,
// electrostatics, magnetic fields, astronomy, matrix theory, and optics. https://en.wikipedia
// .org/wiki/Carl_Friedrich_Gauss
"gauss",
// Marie-Sophie Germain - French mathematician, physicist and philosopher. Known for her work on elasticity theory,
// number theory and philosophy. https://en.wikipedia.org/wiki/Sophie_Germain
"germain",
// Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia
// .org/wiki/Adele_Goldberg_(computer_scientist)
"goldberg",
// Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital
// computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine
"goldstine",
// Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography.
// Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser
"goldwasser",
// James Golick, all around gangster.
"golick",
// Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's
// foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall
"goodall",
// Stephen Jay Gould was was an American paleontologist, evolutionary biologist, and historian of science.
// He is most famous for the theory of punctuated equilibrium -
// https://en.wikipedia.org/wiki/Stephen_Jay_Gould
"gould",
// Carolyn Widney Greider - American molecular biologist and joint winner of the 2009 Nobel Prize for Physiology or
// Medicine for the discovery of telomerase. https://en.wikipedia.org/wiki/Carol_W._Greider
"greider",
// Alexander Grothendieck - German-born French mathematician who became a leading figure in the creation of modern
// algebraic geometry. https://en.wikipedia.org/wiki/Alexander_Grothendieck
"grothendieck",
// Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia
// .org/wiki/Lois_Haibt
"haibt",
// Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which
// developed on-board flight software for the Apollo space program. https://en.wikipedia
// .org/wiki/Margaret_Hamilton_(scientist)
"hamilton",
// Caroline Harriet Haslett - English electrical engineer, electricity industry administrator and champion of
// women's rights. Co-author of British Standard 1363 that specifies AC power plugs and sockets used across the
// United Kingdom (which is widely considered as one of the safest designs). https://en.wikipedia
// .org/wiki/Caroline_Haslett
"haslett",
// Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics.
// https://en.wikipedia.org/wiki/Stephen_Hawking
"hawking",
// Martin Edward Hellman - American cryptologist, best known for his invention of public-key cryptography in
// co-operation with Whitfield Diffie and Ralph Merkle. https://en.wikipedia.org/wiki/Martin_Hellman
"hellman",
// Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg
"heisenberg",
// Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics.
// https://en.wikipedia.org/wiki/Grete_Hermann
"hermann",
// Caroline Lucretia Herschel - German astronomer and discoverer of several comets. https://en.wikipedia
// .org/wiki/Caroline_Herschel
"herschel",
// Heinrich Rudolf Hertz - German physicist who first conclusively proved the existence of the electromagnetic
// waves. https://en.wikipedia.org/wiki/Heinrich_Hertz
"hertz",
// Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and
// recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia
// .org/wiki/Jaroslav_Heyrovsk%C3%BD
"heyrovsky",
// Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was
// awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin
"hodgkin",
// Douglas R. Hofstadter is an American professor of cognitive science and author of the Pulitzer Prize and
// American Book Award-winning work Goedel, Escher, Bach: An Eternal Golden Braid in 1979. A mind-bending work
// which coined Hofstadter's Law: "It always takes longer than you expect, even when you take into account
// Hofstadter's Law." https://en.wikipedia.org/wiki/Douglas_Hofstadter
"hofstadter",
// Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method
// . https://en.wikipedia.org/wiki/Erna_Schneider_Hoover
"hoover",
// Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing
// the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper
"hopper",
// Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of
// semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en
// .wikipedia.org/wiki/Frances_Hugle
"hugle",
// Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics
// - https://en.wikipedia.org/wiki/Hypatia
"hypatia",
// Teruko Ishizaka - Japanese scientist and immunologist who co-discovered the antibody class Immunoglobulin E.
// https://en.wikipedia.org/wiki/Teruko_Ishizaka
"ishizaka",
// Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's
// engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer)
"jackson",
// Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal
// printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil
"jang",
// Mae Carol Jemison - is an American engineer, physician, and former NASA astronaut. She became the first
// black woman to travel in space when she served as a mission specialist aboard the
// Space Shuttle Endeavour - https://en.wikipedia.org/wiki/Mae_Jemison
"jemison",
// Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en
// .wikipedia.org/wiki/Jean_Bartik
"jennings",
// Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of
// Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen
"jepsen",
// Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en
// .wikipedia.org/wiki/Katherine_Johnson
"johnson",
// Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie
// and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie
"joliot",
// Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines
// today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones
"jones",
// A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of
// ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam
"kalam",
// Sergey Petrovich Kapitsa (14 February 1928 – 14 August 2012) was a Russian physicist and demographer.
// He was best known as host of the popular and long-running Russian scientific TV show, Evident, but Incredible.
// His father was the Nobel laureate Soviet-era physicist Pyotr Kapitsa, and his brother was the geographer
// and Antarctic explorer Andrey Kapitsa. - https://en.wikipedia.org/wiki/Sergey_Kapitsa
"kapitsa",
// Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s,
// and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare
"kare",
// Mstislav Keldysh - a Soviet scientist in the field of mathematics and mechanics, academician of the USSR Academy
// of Sciences (1946), President of the USSR Academy of Sciences (1961–1975), three times Hero of Socialist Labor
// (1956, 1961, 1971), fellow of the Royal Society of Edinburgh (1968). https://en.wikipedia.org/wiki/Mstislav_Keldysh
"keldysh",
// Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer
// Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller
"keller",
// Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia
// .org/wiki/Johannes_Kepler
"kepler",
// Omar Khayyam - Persian mathematician, astronomer and poet. Known for his work on the classification and solution
// of cubic equations, for his contribution to the understanding of Euclid's fifth postulate and for computing the
// length of a year very accurately. https://en.wikipedia.org/wiki/Omar_Khayyam
"khayyam",
// Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en
// .wikipedia.org/wiki/Har_Gobind_Khorana
"khorana",
// Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia
// .org/wiki/Jack_Kilby
"kilby",
// Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia
// .org/wiki/Maria_Margarethe_Kirch
"kirch",
// Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX
// typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth
"knuth",
// Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis,
// differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya
"kowalevski",
// Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia
// .org/wiki/Marie-Jeanne_de_Lalande
"lalande",
// Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and
// Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr
"lamarr",
// Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed
// systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport
"lamport",
// Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en
// .wikipedia.org/wiki/Mary_Leakey
"leakey",
// Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and
// the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt
"leavitt",
// Esther Miriam Zimmer Lederberg - American microbiologist and a pioneer of bacterial genetics. https://en
// .wikipedia.org/wiki/Esther_Lederberg
"lederberg",
// Inge Lehmann - Danish seismologist and geophysicist. Known for discovering in 1936 that the Earth has a solid
// inner core inside a molten outer core. https://en.wikipedia.org/wiki/Inge_Lehmann
"lehmann",
// Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for
// routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia
// .org/wiki/Daniel_Lewin
"lewin",
// Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en
// .wikipedia.org/wiki/Ruth_Teitelbaum
"lichterman",
// Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize
// in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov
"liskov",
// Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull)
"lovelace",
// Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia
// .org/wiki/Auguste_and_Louis_Lumi%C3%A8re
"lumiere",
// Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities -
// https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician)
"mahavira",
// Lynn Margulis (b. Lynn Petra Alexander) - an American evolutionary theorist and biologist, science author,
// educator, and popularizer, and was the primary modern proponent for the significance of symbiosis in evolution.
// - https://en.wikipedia.org/wiki/Lynn_Margulis
"margulis",
// Yukihiro Matsumoto - Japanese computer scientist and software programmer best known as the chief designer of the
// Ruby programming language. https://en.wikipedia.org/wiki/Yukihiro_Matsumoto
"matsumoto",
// James Clerk Maxwell - Scottish physicist, best known for his formulation of electromagnetic theory. https://en
// .wikipedia.org/wiki/James_Clerk_Maxwell
"maxwell",
// Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model
// of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer
"mayer",
// John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist)
"mccarthy",
// Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for
// discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock
"mcclintock",
// Anne Laura Dorinthea McLaren - British developmental biologist whose work helped lead to human in-vitro
// fertilisation. https://en.wikipedia.org/wiki/Anne_McLaren
"mclaren",
// Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean
"mclean",
// Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en
// .wikipedia.org/wiki/Kathleen_Antonelli
"mcnulty",
// Gregor Johann Mendel - Czech scientist and founder of genetics. https://en.wikipedia.org/wiki/Gregor_Mendel
"mendel",
// Dmitri Mendeleev - a chemist and inventor. He formulated the Periodic Law, created a farsighted version of the
// periodic table of elements, and used it to correct the properties of some already discovered elements and also
// to predict the properties of eight elements yet to be discovered. https://en.wikipedia.org/wiki/Dmitri_Mendeleev
"mendeleev",
// Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element
// meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner
"meitner",
// Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en
// .wikipedia.org/wiki/Carla_Meninsky
"meninsky",
// Ralph C. Merkle - American computer scientist, known for devising Merkle's puzzles - one of the very first
// schemes for public-key cryptography. Also, inventor of Merkle trees and co-inventor of the Merkle-Damgård
// construction for building collision-resistant cryptographic hash functions and the Merkle-Hellman knapsack
// cryptosystem. https://en.wikipedia.org/wiki/Ralph_Merkle
"merkle",
// Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en
// .wikipedia.org/wiki/Johanna_Mestorf
"mestorf",
// Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969
// . https://en.wikipedia.org/wiki/Marvin_Minsky
"minsky",
// Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia
// .org/wiki/Maryam_Mirzakhani
"mirzakhani",
// Gordon Earle Moore - American engineer, Silicon Valley founding father, author of Moore's law. https://en
// .wikipedia.org/wiki/Gordon_Moore
"moore",
// Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and
// was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse
"morse",
// Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock
"murdock",
// May-Britt Moser - Nobel prize winner neuroscientist who contributed to the discovery of grid cells in the brain.
// https://en.wikipedia.org/wiki/May-Britt_Moser
"moser",
// John Napier of Merchiston - Scottish landowner known as an astronomer, mathematician and physicist. Best known
// for his discovery of logarithms. https://en.wikipedia.org/wiki/John_Napier
"napier",
// John Forbes Nash, Jr. - American mathematician who made fundamental contributions to game theory, differential
// geometry, and the study of partial differential equations. https://en.wikipedia.org/wiki/John_Forbes_Nash_Jr.
"nash",
// John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia
// .org/wiki/Von_Neumann_architecture
"neumann",
// Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton
"newton",
// Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal
// Statistical Society and a pioneer in statistical graphics https://en.wikipedia
// .org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform
"nightingale",
// Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) -
// https://en.wikipedia.org/wiki/Alfred_Nobel
"nobel",
// Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia
// .org/wiki/Emmy_Noether
"noether",
// Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www
// .businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1
"northcutt",
// Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia
// .org/wiki/Robert_Noyce
"noyce",
// Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal
// system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems
"panini",
// Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9
"pare",
// Blaise Pascal, French mathematician, physicist, and inventor - https://en.wikipedia.org/wiki/Blaise_Pascal
"pascal",
// Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur.
"pasteur",
// Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an
// explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en
// .wikipedia.org/wiki/Cecilia_Payne-Gaposchkin
"payne",
// Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree
// protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman
"perlman",
// Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language.
// https://en.wikipedia.org/wiki/Rob_Pike
"pike",
// Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia
// .org/wiki/Henri_Poincar%C3%A9
"poincare",
// Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the
// causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden.
// https://en.wikipedia.org/wiki/Laura_Poitras
"poitras",
// Tat’yana Avenirovna Proskuriakova (January 23 [O.S. January 10] 1909 – August 30, 1985) was a
// Russian-American Mayanist scholar and archaeologist who contributed significantly to the deciphering
// of Maya hieroglyphs, the writing system of the pre-Columbian Maya civilization of Mesoamerica
// . https://en.wikipedia.org/wiki/Tatiana_Proskouriakoff
"proskuriakova",
// Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer,
// astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy
"ptolemy",
// C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en
// .wikipedia.org/wiki/C._V._Raman
"raman",
// Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical
// analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia
// .org/wiki/Srinivasa_Ramanujan
"ramanujan",
// Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the
// youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride
"ride",
// Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the
// discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini)
"montalcini",
// Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie
"ritchie",
// Ida Rhodes - American pioneer in computer programming, designed the first computer used for Social Security.
// https://en.wikipedia.org/wiki/Ida_Rhodes
"rhodes",
// Julia Hall Bowman Robinson - American mathematician renowned for her contributions to the fields of
// computability theory and computational complexity theory. https://en.wikipedia.org/wiki/Julia_Robinson
"robinson",
// Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the
// discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen
"roentgen",
// Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the
// understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin
"rosalind",
// Vera Rubin - American astronomer who pioneered work on galaxy rotation rates. https://en.wikipedia
// .org/wiki/Vera_Rubin
"rubin",
// Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe
// chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha
"saha",
// Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of
// mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet
"sammet",
// Mildred Sanderson - American mathematician best known for Sanderson's theorem concerning modular invariants.
// https://en.wikipedia.org/wiki/Mildred_Sanderson
"sanderson",
// Satoshi Nakamoto is the name used by the unknown person or group of people who developed bitcoin,
// authored the bitcoin white paper, and created and deployed bitcoin's original reference implementation.
// https://en.wikipedia.org/wiki/Satoshi_Nakamoto
"satoshi",
// Adi Shamir - Israeli cryptographer whose numerous inventions and contributions to cryptography include
// the Ferge Fiat Shamir identification scheme, the Rivest Shamir Adleman (RSA) public-key cryptosystem,
// the Shamir's secret sharing scheme, the breaking of the Merkle-Hellman cryptosystem, the TWINKLE and
// TWIRL factoring devices and the discovery of differential cryptanalysis (with Eli Biham).
// https://en.wikipedia.org/wiki/Adi_Shamir
"shamir",
// Claude Shannon - The father of information theory and founder of digital circuit design theory.
// (https://en.wikipedia.org/wiki/Claude_Shannon)
"shannon",
// Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer.
// https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer)
"shaw",
// Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home.
// https://en.wikipedia.org/wiki/Steve_Shirley
"shirley",
// William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley
"shockley",
// Lina Solomonovna Stern (or Shtern; 26 August 1878 – 7 March 1968) was a Soviet biochemist, physiologist and
// humanist whose medical discoveries saved thousands of lives at the fronts of World
// War II. She is best known for her pioneering work on blood–brain barrier, which she described as
// hemato-encephalic barrier in 1921. https://en.wikipedia.org/wiki/Lina_Stern
"shtern",
// Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was
// fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia
// .org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi
"sinoussi",
// Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en
// .wikipedia.org/wiki/Betty_Holberton
"snyder",
// Cynthia Solomon - Pioneer in the fields of artificial intelligence, computer science and educational computing.
// Known for creation of Logo, an educational programming language. https://en.wikipedia.org/wiki/Cynthia_Solomon
"solomon",
// Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en
// .wikipedia.org/wiki/Frances_Spence
"spence",
// Ivan Edward Sutherland - American computer scientist and Internet pioneer, widely regarded as the father of
// computer graphics. https://en.wikipedia.org/wiki/Ivan_Sutherland
"sutherland",
// Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software
// Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the
// ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software
// . https://en.wikiquote.org/wiki/Richard_Stallman
"stallman",
// Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner
// of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker
"stonebraker",
// Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech.
// https://en.wikipedia.org/wiki/Janese_Swanson
"swanson",
// Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we
// know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz
"swartz",
// Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory.
// https://en.wikipedia.org/wiki/Bertha_Swirles
"swirles",
// Helen Brooke Taussig - American cardiologist and founder of the field of paediatric cardiology. https://en
// .wikipedia.org/wiki/Helen_B._Taussig
"taussig",
// Valentina Tereshkova is a Russian engineer, cosmonaut and politician. She was the first woman to fly to space in
// 1963. In 2013, at the age of 76, she offered to go on a one-way mission to Mars. https://en.wikipedia
// .org/wiki/Valentina_Tereshkova
"tereshkova",
// Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en
// .wikipedia.org/wiki/Nikola_Tesla
"tesla",
// Marie Tharp - American geologist and oceanic cartographer who co-created the first scientific map of the
// Atlantic Ocean floor. Her work led to the acceptance of the theories of plate tectonics and continental drift.
// https://en.wikipedia.org/wiki/Marie_Tharp
"tharp",
// Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson
"thompson",
// Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds
"torvalds",
// Youyou Tu - Chinese pharmaceutical chemist and educator known for discovering artemisinin and
// dihydroartemisinin, used to treat malaria, which has saved millions of lives. Joint winner of the 2015 Nobel
// Prize in Physiology or Medicine. https://en.wikipedia.org/wiki/Tu_Youyou
"tu",
// Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing.
"turing",
// Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en
// .wikipedia.org/wiki/Var%C4%81hamihira#Contributions
"varahamihira",
// Dorothy Vaughan was a NASA mathematician and computer programmer on the SCOUT launch vehicle program that put
// America's first satellites into space - https://en.wikipedia.org/wiki/Dorothy_Vaughan
"vaughan",
// Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's
// highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in
// India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya
"visvesvaraya",
// Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research
// on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard
"volhard",
// Cédric Villani - French mathematician, won Fields Medal, Fermat Prize and Poincaré Price for his work in
// differential geometry and statistical mechanics. https://en.wikipedia.org/wiki/C%C3%A9dric_Villani
"villani",
// Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC -
// https://en.wikipedia.org/wiki/Marlyn_Meltzer
"wescoff",
// Sylvia B. Wilbur - British computer scientist who helped develop the ARPANET, was one of the first to
// exchange email in the UK and a leading researcher in computer-supported collaborative work.
// https://en.wikipedia.org/wiki/Sylvia_Wilbur
"wilbur",
// Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem -
// https://en.wikipedia.org/wiki/Andrew_Wiles
"wiles",
// Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the
// King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams
"williams",
// Malcolm John Williamson - British mathematician and cryptographer employed by the GCHQ. Developed in 1974 what
// is now known as Diffie-Hellman key exchange (Diffie and Hellman first published the scheme in 1976). https://en
// .wikipedia.org/wiki/Malcolm_J._Williamson
"williamson",
// Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en
// .wikipedia.org/wiki/Sophie_Wilson
"wilson",
// Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing
"wing",
// Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak
"wozniak",
// The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful
// airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en
// .wikipedia.org/wiki/Wright_brothers
"wright",
// Chien-Shiung Wu - Chinese-American experimental physicist who made significant contributions to nuclear physics.
// https://en.wikipedia.org/wiki/Chien-Shiung_Wu
"wu",
// Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977
// Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia
// .org/wiki/Rosalyn_Sussman_Yalow
"yalow",
// Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the
// sciences. https://en.wikipedia.org/wiki/Ada_Yonath
"yonath",
// Nikolay Yegorovich Zhukovsky (January 17 1847 – March 17, 1921) was a Russian scientist, mathematician
// and engineer, and a founding father of modern aero- and hydrodynamics. Whereas contemporary scientists
// scoffed at the idea of human flight, Zhukovsky was the first to undertake the study of
// airflow. He is often called the Father of Russian Aviation. https://en.wikipedia
// .org/wiki/Nikolay_Yegorovich_Zhukovsky
"zhukovsky"};
static {
Collections.shuffle(Arrays.asList(LEFT));
Collections.shuffle(Arrays.asList(RIGHT));
}
private MobyNames() {
}
/**
* Returns a name from the list of names formatted as "prefix_adjective_surname" (or "adjective_surname" is prefix is null),
* for example 'foo_focused_turing' (or 'focused_turing' if prefix is null). The list is randomized on class
* initialization, but the answers from repeated calls with the same number
* are stable.
*
* @param number index into the sequence of names
* @return a Moby name
*/
public static String getRandomName(int number) {
int combinationIdx = number % (LEFT.length * RIGHT.length);
int rightIdx = combinationIdx / LEFT.length;
int leftIdx = combinationIdx % LEFT.length;
String name = String.format(NAME_FORMAT, LEFT[leftIdx], RIGHT[rightIdx]);
String prefix = System.getProperty(MOBY_NAMING_PREFIX);
if (prefix != null) {
name = prefix + "_" + name;
}
return name;
}
}
| hazelcast/hazelcast | hazelcast/src/main/java/com/hazelcast/instance/impl/MobyNames.java |
1,464 | // Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
package org.bytedeco.dnnl;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
import static org.bytedeco.javacpp.presets.javacpp.*;
import org.bytedeco.opencl.*;
import static org.bytedeco.opencl.global.OpenCL.*;
import static org.bytedeco.dnnl.global.dnnl.*;
/** \} dnnl_api_primitives_common
<p>
* \} dnnl_api_primitives
<p>
* \addtogroup dnnl_api_memory Memory
*
* A container that describes and stores data. Memory objects can contain
* data of various types and formats. There are two levels of abstraction:
*
* 1. **Memory descriptor** -- engine-agnostic logical description of data
* (number of dimensions, dimension sizes, and data type), and,
* optionally, the information about the physical format of data in
* memory. If this information is not known yet, a memory descriptor can
* be created with #dnnl::memory::format_tag::any. This allows
* compute-intensive primitives to choose the best format for
* computation. The user is responsible for reordering the data into the
* chosen format when formats do not match.
*
* A memory descriptor can be initialized either by specifying dimensions
* and a memory format tag or strides for each of them, or by
* manipulating the dnnl_memory_desc_t structure directly.
*
* \warning
* The latter approach requires understanding how the physical data
* representation is mapped to the structure and is discouraged. This
* topic is discussed in \ref dev_guide_understanding_memory_formats.
*
* The user can query the amount of memory required by a memory
* descriptor using the #dnnl::memory::desc::get_size() function. The
* size of data in general cannot be computed as the product of
* dimensions multiplied by the size of the data type. So users are
* required to use this function for better code portability.
*
* Two memory descriptors can be compared using the equality and
* inequality operators. The comparison is especially useful when
* checking whether it is necessary to reorder data from the user's data
* format to a primitive's format.
*
* 2. **Memory object** -- an engine-specific object that handles the memory
* buffer and its description (a memory descriptor). For the CPU engine or
* with USM, the memory buffer handle is simply a pointer to \c void. The
* memory buffer can be queried using #dnnl::memory::get_data_handle() and
* set using #dnnl::memory::set_data_handle(). The underlying SYCL buffer,
* when used, can be queried using #dnnl::sycl_interop::get_buffer and set
* using #dnnl::sycl_interop::set_buffer. A memory object can also be
* queried for the underlying memory descriptor and for its engine using
* #dnnl::memory::get_desc() and dnnl::memory::get_engine().
*
* Along with ordinary memory descriptors with all dimensions being positive,
* the library supports *zero-volume* memory descriptors with one or more
* dimensions set to zero. This is used to support the NumPy\* convention.
* If a zero-volume memory is passed to a primitive, the primitive typically
* does not perform any computations with this memory. For example:
*
* - A concatenation primitive would ignore all memory object with zeroes in
* the concat dimension / axis.
*
* - A forward convolution with a source memory object with zero in the
* minibatch dimension would always produce a destination memory object
* with a zero in the minibatch dimension and perform no computations.
*
* - However, a forward convolution with a zero in one of the weights
* dimensions is ill-defined and is considered to be an error by the
* library because there is no clear definition of what the output values
* should be.
*
* Memory buffer of a zero-volume memory is never accessed.
*
* \{
<p>
* Memory object.
*
* A memory object encapsulates a handle to a memory buffer allocated on a
* specific engine, tensor dimensions, data type, and memory format, which is
* the way tensor indices map to offsets in linear memory space. Memory
* objects are passed to primitives during execution. */
@Namespace("dnnl") @Properties(inherit = org.bytedeco.dnnl.presets.dnnl.class)
public class memory extends dnnl_memory_handle {
static { Loader.load(); }
public memory() { super((Pointer)null); allocate(); }
private native void allocate();
public memory(@Const @ByRef memory arg0) { super((Pointer)null); allocate(arg0); }
private native void allocate(@Const @ByRef memory arg0);
///
public memory(dnnl_memory t, @Cast("bool") boolean weak/*=false*/) { super((Pointer)null); allocate(t, weak); }
private native void allocate(dnnl_memory t, @Cast("bool") boolean weak/*=false*/);
public memory(dnnl_memory t) { super((Pointer)null); allocate(t); }
private native void allocate(dnnl_memory t);
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public memory(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public memory(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public memory position(long position) {
return (memory)super.position(position);
}
@Override public memory getPointer(long i) {
return new memory((Pointer)this).offsetAddress(i);
}
/** Integer type for representing dimension sizes and indices. */
/** Vector of dimensions. Implementations are free to force a limit on the
* vector's length. */
///
/** Helper function that validates that an {@code std::vector} of dimensions can
* be safely converted to the C API array ::dnnl_dims_t. Throws if
* validation fails.
*
* @param v Vector of dimensions.
* @param min_size Minimum expected size of the vector. */
/** Data type specification. */
public enum data_type {
/** Undefined data type (used for empty memory descriptors). */
undef(dnnl_data_type_undef),
/** [OFP8 standard 8-bit floating-point](https://www.opencompute.org/documents/ocp-8-bit-floating-point-specification-ofp8-revision-1-0-2023-06-20-pdf)
* with a 5-bit exponent and a 2-bit mantissa. */
f8_e5m2(dnnl_f8_e5m2),
/** [OFP8 standard 8-bit floating-point](https://www.opencompute.org/documents/ocp-8-bit-floating-point-specification-ofp8-revision-1-0-2023-06-20-pdf)
* with a 4-bit exponent and a 3-bit mantissa. */
f8_e4m3(dnnl_f8_e4m3),
/** [16-bit/half-precision floating point](https://en.wikipedia.org/wiki/Half-precision_floating-point_format). */
f16(dnnl_f16),
/** non-standard
* [16-bit floating point with 7-bit mantissa](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format). */
bf16(dnnl_bf16),
/** [32-bit/single-precision floating point](https://en.wikipedia.org/wiki/Single-precision_floating-point_format). */
f32(dnnl_f32),
//// [64-bit/double-precision floating point](https://en.wikipedia.org/wiki/Double-precision_floating-point_format).
f64(dnnl_f64),
/** 32-bit signed integer. */
s32(dnnl_s32),
/** 8-bit signed integer. */
s8(dnnl_s8),
/** 8-bit unsigned integer. */
u8(dnnl_u8);
public final int value;
private data_type(int v) { this.value = v; }
private data_type(data_type e) { this.value = e.value; }
public data_type intern() { for (data_type e : values()) if (e.value == value) return e; return this; }
@Override public String toString() { return intern().name(); }
}
/** Returns size of data type in bytes.
* @return The number of bytes occupied by data type. */
public static native @Cast("size_t") long data_type_size(data_type adata_type);
/** Memory format kind */
public enum format_kind {
/** Undefined memory format kind, used for empty memory descriptors. */
undef(dnnl_format_kind_undef),
/** A special format kind that indicates that the actual format will be
* selected by a primitive automatically. */
any(dnnl_format_kind_any),
/** A tensor in a generic format described by the stride and blocking
* values in each dimension. */
blocked(dnnl_blocked),
// #ifdef DNNL_EXPERIMENTAL_SPARSE
// #endif
/** A special format kind that indicates that tensor format is opaque. */
opaque(dnnl_format_kind_opaque);
public final int value;
private format_kind(int v) { this.value = v; }
private format_kind(format_kind e) { this.value = e.value; }
public format_kind intern() { for (format_kind e : values()) if (e.value == value) return e; return this; }
@Override public String toString() { return intern().name(); }
}
// #ifdef DNNL_EXPERIMENTAL_SPARSE
// #endif
/** Memory format tag specification.
*
* Memory format tags can be further divided into two categories:
*
* - Domain-agnostic names, i.e. names that do not depend on the tensor
* usage in the specific primitive. These names use letters from {@code a}
* to {@code f} to denote logical dimensions and form the order in which the
* dimensions are laid in memory. For example,
* #dnnl::memory::format_tag::ab is used to denote a 2D tensor where the
* second logical dimension (denoted as {@code b}) is the innermost, i.e.
* has stride = 1, and the first logical dimension ({@code a}) is laid out in
* memory with stride equal to the size of the second dimension. On the
* other hand, #dnnl::memory::format_tag::ba is the transposed version
* of the same tensor: the outermost dimension ({@code a}) becomes the
* innermost one.
*
* - Domain-specific names, i.e. names that make sense only in the
* context of a certain domain, such as CNN. These names are
* aliases to the corresponding domain-agnostic tags and used mostly
* for convenience. For example, #dnnl::memory::format_tag::nc
* is used to denote 2D CNN activations tensor memory format, where
* the channels dimension is the innermost one and the batch dimension
* is the outermost one. Moreover, #dnnl::memory::format_tag::nc is
* an alias for #dnnl::memory::format_tag::ab, because for
* CNN primitives the logical dimensions of activations tensors come
* in order: batch, channels, spatial. In other words, batch
* corresponds to the first logical dimension ({@code a}), and channels
* correspond to the second one ({@code b}).
*
* The following domain-specific notation applies to memory format tags:
* - \c 'n' denotes the mini-batch dimension
* - \c 'c' denotes a channels dimension
* - When there are multiple channel dimensions (for example,
* in convolution weights tensor), \c 'i' and \c 'o' denote dimensions
* of input and output channels
* - \c 'g' denotes a groups dimension for convolution weights
* - \c 'd', \c 'h', and \c 'w' denote spatial depth, height, and width
* respectively
*
* See \ref dnnl_format_tag_t for a detailed description. */
public enum format_tag {
/** Undefined memory format tag */
undef(dnnl_format_tag_undef),
/** Placeholder memory format tag. Used to instruct the primitive to
* select a format automatically. */
any(dnnl_format_tag_any),
/** plain 1D tensor */
a(dnnl_a),
/** plain 2D tensor */
ab(dnnl_ab),
/** permuted 2D tensor */
ba(dnnl_ba),
/** plain 3D tensor */
abc(dnnl_abc),
/** permuted 3D tensor */
acb(dnnl_acb),
/** permuted 3D tensor */
bac(dnnl_bac),
/** permuted 3D tensor */
bca(dnnl_bca),
/** permuted 3D tensor */
cba(dnnl_cba),
/** plain 4D tensor */
abcd(dnnl_abcd),
/** permuted 4D tensor */
abdc(dnnl_abdc),
/** permuted 4D tensor */
acbd(dnnl_acbd),
/** permuted 4D tensor */
acdb(dnnl_acdb),
/** permuted 4D tensor */
adbc(dnnl_adbc),
/** permuted 4D tensor */
bacd(dnnl_bacd),
/** permuted 4D tensor */
bcda(dnnl_bcda),
/** permuted 4D tensor */
cdba(dnnl_cdba),
/** permuted 4D tensor */
dcab(dnnl_dcab),
/** plain 5D tensor */
abcde(dnnl_abcde),
/** permuted 5D tensor */
abdec(dnnl_abdec),
/** permuted 5D tensor */
acbde(dnnl_acbde),
/** permuted 5D tensor */
acdeb(dnnl_acdeb),
/** permuted 5D tensor */
bacde(dnnl_bacde),
/** permuted 5D tensor */
bcdea(dnnl_bcdea),
/** permuted 5D tensor */
cdeba(dnnl_cdeba),
/** permuted 5D tensor */
decab(dnnl_decab),
/** permuted 5D tensor */
abced(dnnl_abced),
/** plain 6D tensor */
abcdef(dnnl_abcdef),
/** permuted 6D tensor */
abdfce(dnnl_abdfce),
/** permuted 6D tensor */
acbdef(dnnl_acbdef),
/** permuted 6D tensor */
abdefc(dnnl_abdefc),
/** permuted 6D tensor */
defcab(dnnl_defcab),
/** permuted 6D tensor */
abcdfe(dnnl_abcdfe),
/** plain 7D tensor */
abcdefg(dnnl_abcdefg),
/** permuted 7D tensor */
abcdegf(dnnl_abcdegf),
/** plain 8D tensor */
abcdefgh(dnnl_abcdefgh),
/** permuted 8D tensor */
abcdefhg(dnnl_abcdefhg),
/** plain 9D tensor */
abcdefghi(dnnl_abcdefghi),
/** permuted 9D tensor */
abcdefgih(dnnl_abcdefgih),
/** plain 10D tensor */
abcdefghij(dnnl_abcdefghij),
/** permuted 10D tensor */
abcdefghji(dnnl_abcdefghji),
/** plain 11D tensor */
abcdefghijk(dnnl_abcdefghijk),
/** permuted 11D tensor */
abcdefghikj(dnnl_abcdefghikj),
/** plain 12D tensor */
abcdefghijkl(dnnl_abcdefghijkl),
/** permuted 12D tensor */
abcdefghijlk(dnnl_abcdefghijlk),
/** 1D tensor; an alias for #dnnl::memory::format_tag::a */
x(a.value),
/** 2D CNN activations tensor; an alias for #dnnl::memory::format_tag::ab */
nc(ab.value),
/** 2D CNN activations tensor; an alias for #dnnl::memory::format_tag::ba */
cn(ba.value),
/** 2D RNN statistics tensor; an alias for #dnnl::memory::format_tag::ab */
tn(ab.value),
/** 2D RNN statistics tensor; an alias for #dnnl::memory::format_tag::ba */
nt(ba.value),
/** 3D CNN activations tensor; an alias for #dnnl::memory::format_tag::abc */
ncw(abc.value),
/** 3D CNN activations tensor; an alias for #dnnl::memory::format_tag::acb */
nwc(acb.value),
/** 4D CNN activations tensor; an alias for #dnnl::memory::format_tag::abcd */
nchw(abcd.value),
/** 4D CNN activations tensor; an alias for #dnnl::memory::format_tag::acdb */
nhwc(acdb.value),
/** 4D CNN activations tensor; an alias for #dnnl::memory::format_tag::bcda */
chwn(bcda.value),
/** 5D CNN activations tensor; an alias for #dnnl::memory::format_tag::abcde */
ncdhw(abcde.value),
/** 5D CNN activations tensor; an alias for #dnnl::memory::format_tag::acdeb */
ndhwc(acdeb.value),
/** 2D CNN weights tensor; an alias for #dnnl::memory::format_tag::ab */
oi(ab.value),
/** 2D CNN weights tensor; an alias for #dnnl::memory::format_tag::ba */
io(ba.value),
/** 3D CNN weights tensor; an alias for #dnnl::memory::format_tag::abc */
oiw(abc.value),
/** 3D CNN weights tensor; an alias for #dnnl::memory::format_tag::acb */
owi(acb.value),
/** 3D CNN weights tensor; an alias for #dnnl::memory::format_tag::cba */
wio(cba.value),
/** 3D CNN weights tensor; an alias for #dnnl::memory::format_tag::bca */
iwo(bca.value),
/** 4D CNN weights tensor; an alias for #dnnl::memory::format_tag::abcd */
oihw(abcd.value),
/** 4D CNN weights tensor; an alias for #dnnl::memory::format_tag::cdba */
hwio(cdba.value),
/** 4D CNN weights tensor; an alias for #dnnl::memory::format_tag::acdb */
ohwi(acdb.value),
/** 4D CNN weights tensor; an alias for #dnnl::memory::format_tag::bcda */
ihwo(bcda.value),
/** 4D CNN weights tensor; an alias for #dnnl::memory::format_tag::bacd */
iohw(bacd.value),
/** 5D CNN weights tensor; an alias for #dnnl::memory::format_tag::abcde */
oidhw(abcde.value),
/** 5D CNN weights tensor; an alias for #dnnl::memory::format_tag::cdeba */
dhwio(cdeba.value),
/** 5D CNN weights tensor; an alias for #dnnl::memory::format_tag::acdeb */
odhwi(acdeb.value),
/** 5D CNN weights tensor; an alias for #dnnl::memory::format_tag::bacde */
iodhw(bacde.value),
/** 5D CNN weights tensor; an alias for #dnnl::memory::format_tag::bcdea */
idhwo(bcdea.value),
/** 4D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::abcd */
goiw(abcd.value),
/** 4D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::abdc */
gowi(abdc.value),
/** 4D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::dcab */
wigo(dcab.value),
/** 5D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::abdec */
gohwi(abdec.value),
/** 5D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::abcde */
goihw(abcde.value),
/** 5D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::decab */
hwigo(decab.value),
/** 5D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::acbde */
giohw(acbde.value),
/** 6D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::abcdef */
goidhw(abcdef.value),
/** 6D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::abcdef */
giodhw(acbdef.value),
/** 6D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::abdefc */
godhwi(abdefc.value),
/** 6D CNN weights tensor with groups; an alias for #dnnl::memory::format_tag::defcab */
dhwigo(defcab.value),
/** 3D RNN data tensor in the format (seq_length, batch, input
* channels); an alias for #dnnl::memory::format_tag::abc. */
tnc(abc.value),
/** 3D RNN data tensor in the format (batch, seq_length, input
* channels); an alias for #dnnl::memory::format_tag::bac. */
ntc(bac.value),
/** 4D RNN states tensor in the format (num_layers, num_directions,
* batch, state channels); an alias for #dnnl::memory::format_tag::abcd. */
///
ldnc(abcd.value),
/** 5D RNN weights tensor in the format (num_layers, num_directions,
* input_channels, num_gates, output_channels);
* an alias for #dnnl::memory::format_tag::abcde.
*
* - For LSTM cells, the gates order is input, forget, candidate
* and output gate.
* - For GRU cells, the gates order is update, reset and output gate. */
///
ldigo(abcde.value),
/** 5D RNN weights tensor in the format (num_layers, num_directions,
* num_gates, output_channels, input_channels);
* an alias for #dnnl::memory::format_tag::abdec.
*
* - For LSTM cells, the gates order is input, forget, candidate
* and output gate.
* - For GRU cells, the gates order is update, reset and output gate. */
ldgoi(abdec.value),
/** 4D LSTM projection tensor in the format (num_layers, num_directions,
* num_channels_in_hidden_state, num_channels_in_recurrent_projection);
* an alias for #dnnl::memory::format_tag::abcd. */
ldio(abcd.value),
/** 4D LSTM projection tensor in the format (num_layers, num_directions,
* num_channels_in_recurrent_projection, num_channels_in_hidden_state);
* an alias for #dnnl::memory::format_tag::abdc. */
///
ldoi(abdc.value),
/** 4D RNN bias tensor in the format (num_layers, num_directions,
* num_gates, output_channels);
* an alias for #dnnl::memory::format_tag::abcd.
*
* - For LSTM cells, the gates order is input, forget, candidate
* and output gate.
* - For GRU cells, the gates order is update, reset and output gate. */
ldgo(abcd.value),
// Opaque blocked formats
AB16b16a(dnnl_AB16b16a),
AB16b32a(dnnl_AB16b32a),
AB16b48a(dnnl_AB16b48a),
AB16b64a(dnnl_AB16b64a),
AB8b16a2b(dnnl_AB8b16a2b),
AB8b32a2b(dnnl_AB8b32a2b),
AB8b64a2b(dnnl_AB8b64a2b),
AB4b16a4b(dnnl_AB4b16a4b),
AB4b32a4b(dnnl_AB4b32a4b),
AB4b64a4b(dnnl_AB4b64a4b),
AB16b16a4b(dnnl_AB16b16a4b),
AB16b32a4b(dnnl_AB16b32a4b),
AB16b48a4b(dnnl_AB16b48a4b),
AB16b64a4b(dnnl_AB16b64a4b),
AB16b16a2b(dnnl_AB16b16a2b),
AB16b32a2b(dnnl_AB16b32a2b),
AB16b48a2b(dnnl_AB16b48a2b),
AB16b64a2b(dnnl_AB16b64a2b),
Ab4a(dnnl_Ab4a),
Ab8a(dnnl_Ab8a),
Abc16a(dnnl_Abc16a),
ABc16a16b(dnnl_ABc16a16b),
ABc4a4b(dnnl_ABc4a4b),
aBc16b(dnnl_aBc16b),
aBc32b(dnnl_aBc32b),
ABc16b16a(dnnl_ABc16b16a),
AcB16b16a(dnnl_AcB16b16a),
ABc16b32a(dnnl_ABc16b32a),
AcB16b32a(dnnl_AcB16b32a),
ABc16b48a(dnnl_ABc16b48a),
AcB16b48a(dnnl_AcB16b48a),
ABc16b64a(dnnl_ABc16b64a),
AcB16b64a(dnnl_AcB16b64a),
Abc4a(dnnl_Abc4a),
aBc4b(dnnl_aBc4b),
ABc4b16a4b(dnnl_ABc4b16a4b),
AcB4b16a4b(dnnl_AcB4b16a4b),
ABc4b32a4b(dnnl_ABc4b32a4b),
AcB4b32a4b(dnnl_AcB4b32a4b),
ABc4b64a4b(dnnl_ABc4b64a4b),
AcB4b64a4b(dnnl_AcB4b64a4b),
ABc2b8a4b(dnnl_ABc2b8a4b),
ABc16a16b2a(dnnl_ABc16a16b2a),
ABc16b16a4b(dnnl_ABc16b16a4b),
ABc16b32a4b(dnnl_ABc16b32a4b),
ABc16b48a4b(dnnl_ABc16b48a4b),
ABc16b64a4b(dnnl_ABc16b64a4b),
ABc16b16a2b(dnnl_ABc16b16a2b),
ABc16b32a2b(dnnl_ABc16b32a2b),
ABc16b48a2b(dnnl_ABc16b48a2b),
ABc16b64a2b(dnnl_ABc16b64a2b),
ABc4b4a(dnnl_ABc4b4a),
ABc8a16b2a(dnnl_ABc8a16b2a),
ABc8a8b(dnnl_ABc8a8b),
ABc8a4b(dnnl_ABc8a4b),
aBc8b(dnnl_aBc8b),
ABc8b16a2b(dnnl_ABc8b16a2b),
AcB8b16a2b(dnnl_AcB8b16a2b),
ABc8b32a2b(dnnl_ABc8b32a2b),
AcB8b32a2b(dnnl_AcB8b32a2b),
ABc8b64a2b(dnnl_ABc8b64a2b),
AcB8b64a2b(dnnl_AcB8b64a2b),
ABc8b8a(dnnl_ABc8b8a),
AcB8b8a(dnnl_AcB8b8a),
Abcd8a(dnnl_Abcd8a),
Abcd16a(dnnl_Abcd16a),
Abcd32a(dnnl_Abcd32a),
ABcd16a16b(dnnl_ABcd16a16b),
aBcd16b(dnnl_aBcd16b),
aBcd32b(dnnl_aBcd32b),
ABcd16b16a(dnnl_ABcd16b16a),
AcdB16b16a(dnnl_AcdB16b16a),
ABcd16b32a(dnnl_ABcd16b32a),
AcdB16b32a(dnnl_AcdB16b32a),
ABcd16b48a(dnnl_ABcd16b48a),
AcdB16b48a(dnnl_AcdB16b48a),
ABcd16b64a(dnnl_ABcd16b64a),
AcdB16b64a(dnnl_AcdB16b64a),
aBCd16b16c(dnnl_aBCd16b16c),
aBCd16c16b(dnnl_aBCd16c16b),
Abcd4a(dnnl_Abcd4a),
aBcd4b(dnnl_aBcd4b),
ABcd4b16a4b(dnnl_ABcd4b16a4b),
AcdB4b16a4b(dnnl_AcdB4b16a4b),
ABcd4b32a4b(dnnl_ABcd4b32a4b),
AcdB4b32a4b(dnnl_AcdB4b32a4b),
ABcd4b64a4b(dnnl_ABcd4b64a4b),
AcdB4b64a4b(dnnl_AcdB4b64a4b),
ABcd2b8a4b(dnnl_ABcd2b8a4b),
ABcd4b4a(dnnl_ABcd4b4a),
ABcd4a4b(dnnl_ABcd4a4b),
aBCd4c16b4c(dnnl_aBCd4c16b4c),
aBCd2c8b4c(dnnl_aBCd2c8b4c),
ABcd16a16b2a(dnnl_ABcd16a16b2a),
ABcd16b16a4b(dnnl_ABcd16b16a4b),
ABcd16b32a4b(dnnl_ABcd16b32a4b),
ABcd16b48a4b(dnnl_ABcd16b48a4b),
ABcd16b64a4b(dnnl_ABcd16b64a4b),
ABcd16b16a2b(dnnl_ABcd16b16a2b),
ABcd16b32a2b(dnnl_ABcd16b32a2b),
ABcd16b48a2b(dnnl_ABcd16b48a2b),
ABcd16b64a2b(dnnl_ABcd16b64a2b),
aBCd16b16c2b(dnnl_aBCd16b16c2b),
aBCd16c16b4c(dnnl_aBCd16c16b4c),
aBCd16c16b2c(dnnl_aBCd16c16b2c),
aBCd4c4b(dnnl_aBCd4c4b),
aBCd4b4c(dnnl_aBCd4b4c),
ABcd8a16b2a(dnnl_ABcd8a16b2a),
ABcd8a8b(dnnl_ABcd8a8b),
ABcd8a4b(dnnl_ABcd8a4b),
ABcd8a2b(dnnl_ABcd8a2b),
/** 4D tensor blocked by 2nd dimension with block size 8 */
aBcd8b(dnnl_aBcd8b),
ABcd8b16a2b(dnnl_ABcd8b16a2b),
AcdB8b16a2b(dnnl_AcdB8b16a2b),
ABcd8b32a2b(dnnl_ABcd8b32a2b),
AcdB8b32a2b(dnnl_AcdB8b32a2b),
ABcd8b64a2b(dnnl_ABcd8b64a2b),
AcdB8b64a2b(dnnl_AcdB8b64a2b),
aBCd8b16c2b(dnnl_aBCd8b16c2b),
/** 4D tensor blocked by 1st and 2nd dimension with block size 8 */
ABcd8b8a(dnnl_ABcd8b8a),
AcdB8b8a(dnnl_AcdB8b8a),
aBCd8b8c(dnnl_aBCd8b8c),
aBCd8b4c(dnnl_aBCd8b4c),
aBCd8c16b2c(dnnl_aBCd8c16b2c),
aBCd8c8b(dnnl_aBCd8c8b),
Abcde16a(dnnl_Abcde16a),
Abcde32a(dnnl_Abcde32a),
ABcde16a16b(dnnl_ABcde16a16b),
aBcde16b(dnnl_aBcde16b),
aBcde32b(dnnl_aBcde32b),
ABcde16b16a(dnnl_ABcde16b16a),
AcdeB16b16a(dnnl_AcdeB16b16a),
ABcde16b32a(dnnl_ABcde16b32a),
AcdeB16b32a(dnnl_AcdeB16b32a),
ABcde16b48a(dnnl_ABcde16b48a),
AcdeB16b48a(dnnl_AcdeB16b48a),
ABcde16b64a(dnnl_ABcde16b64a),
AcdeB16b64a(dnnl_AcdeB16b64a),
aBCde16b16c(dnnl_aBCde16b16c),
aBCde16c16b(dnnl_aBCde16c16b),
aBCde2c8b4c(dnnl_aBCde2c8b4c),
Abcde4a(dnnl_Abcde4a),
aBcde4b(dnnl_aBcde4b),
ABcde4b4a(dnnl_ABcde4b4a),
ABcde4a4b(dnnl_ABcde4a4b),
aBCde4b4c(dnnl_aBCde4b4c),
aBCde4c16b4c(dnnl_aBCde4c16b4c),
aBCde16b16c2b(dnnl_aBCde16b16c2b),
aBCde16c16b4c(dnnl_aBCde16c16b4c),
aBCde16c16b2c(dnnl_aBCde16c16b2c),
aBCdef16c16b2c(dnnl_aBCdef16c16b2c),
aBCde4c4b(dnnl_aBCde4c4b),
Abcde8a(dnnl_Abcde8a),
ABcde8a8b(dnnl_ABcde8a8b),
ABcde8a4b(dnnl_ABcde8a4b),
aBcde8b(dnnl_aBcde8b),
ABcde8b16a2b(dnnl_ABcde8b16a2b),
AcdeB8b16a2b(dnnl_AcdeB8b16a2b),
ABcde8b32a2b(dnnl_ABcde8b32a2b),
AcdeB8b32a2b(dnnl_AcdeB8b32a2b),
ABcde8b64a2b(dnnl_ABcde8b64a2b),
AcdeB8b64a2b(dnnl_AcdeB8b64a2b),
ABcde4b16a4b(dnnl_ABcde4b16a4b),
AcdeB4b16a4b(dnnl_AcdeB4b16a4b),
ABcde4b32a4b(dnnl_ABcde4b32a4b),
AcdeB4b32a4b(dnnl_AcdeB4b32a4b),
ABcde4b64a4b(dnnl_ABcde4b64a4b),
AcdeB4b64a4b(dnnl_AcdeB4b64a4b),
ABcde16b16a4b(dnnl_ABcde16b16a4b),
ABcde16b32a4b(dnnl_ABcde16b32a4b),
ABcde16b48a4b(dnnl_ABcde16b48a4b),
ABcde16b64a4b(dnnl_ABcde16b64a4b),
ABcde16b16a2b(dnnl_ABcde16b16a2b),
ABcde16b32a2b(dnnl_ABcde16b32a2b),
ABcde16b48a2b(dnnl_ABcde16b48a2b),
ABcde16b64a2b(dnnl_ABcde16b64a2b),
ABcde2b8a4b(dnnl_ABcde2b8a4b),
aBCde8b16c2b(dnnl_aBCde8b16c2b),
ABcde8b8a(dnnl_ABcde8b8a),
AcdeB8b8a(dnnl_AcdeB8b8a),
aBCde8b8c(dnnl_aBCde8b8c),
aBCde8b4c(dnnl_aBCde8b4c),
ABcd4a8b8a4b(dnnl_ABcd4a8b8a4b),
ABcd2a8b8a2b(dnnl_ABcd2a8b8a2b),
aBCde4b8c8b4c(dnnl_aBCde4b8c8b4c),
aBCde2b8c8b2c(dnnl_aBCde2b8c8b2c),
aBCde8c16b2c(dnnl_aBCde8c16b2c),
aBCde8c8b(dnnl_aBCde8c8b),
aBcdef16b(dnnl_aBcdef16b),
aBCdef16b16c(dnnl_aBCdef16b16c),
aBCdef16c16b(dnnl_aBCdef16c16b),
aBcdef4b(dnnl_aBcdef4b),
aBCdef2c8b4c(dnnl_aBCdef2c8b4c),
aBCdef4c4b(dnnl_aBCdef4c4b),
aBCdef4b4c(dnnl_aBCdef4b4c),
aBCdef8b8c(dnnl_aBCdef8b8c),
aBCdef8b4c(dnnl_aBCdef8b4c),
aBCdef8c16b2c(dnnl_aBCdef8c16b2c),
aBCdef4c16b4c(dnnl_aBCdef4c16b4c),
aBCdef8c8b(dnnl_aBCdef8c8b),
aBdc16b(dnnl_aBdc16b),
aBdc4b(dnnl_aBdc4b),
aBdc8b(dnnl_aBdc8b),
aBdC8b2c(dnnl_aBdC8b2c),
aBdC8b4c(dnnl_aBdC8b4c),
aBdec16b(dnnl_aBdec16b),
aBdec4b(dnnl_aBdec4b),
aBdec8b(dnnl_aBdec8b),
aBdeC8b2c(dnnl_aBdeC8b2c),
aBdeC8b4c(dnnl_aBdeC8b4c),
aBdefc16b(dnnl_aBdefc16b),
aCBdef16c16b(dnnl_aCBdef16c16b),
aCBdef16b16c(dnnl_aCBdef16b16c),
aBdefc4b(dnnl_aBdefc4b),
aBdefc8b(dnnl_aBdefc8b),
aBdefC8b2c(dnnl_aBdefC8b2c),
aBdefC8b4c(dnnl_aBdefC8b4c),
Acb16a(dnnl_Acb16a),
Acb4a(dnnl_Acb4a),
Acb8a(dnnl_Acb8a),
AcB8a2b(dnnl_AcB8a2b),
AcB8a4b(dnnl_AcB8a4b),
aCBd16b16c(dnnl_aCBd16b16c),
aCBd16c16b(dnnl_aCBd16c16b),
aCBde16b16c(dnnl_aCBde16b16c),
aCBde16c16b(dnnl_aCBde16c16b),
Acdb16a(dnnl_Acdb16a),
Acdb4a(dnnl_Acdb4a),
Acdb8a(dnnl_Acdb8a),
AcdB8a2b(dnnl_AcdB8a2b),
AcdB8a4b(dnnl_AcdB8a4b),
Acdeb16a(dnnl_Acdeb16a),
Acdeb4a(dnnl_Acdeb4a),
Acdeb8a(dnnl_Acdeb8a),
AcdeB8a2b(dnnl_AcdeB8a2b),
AcdeB8a4b(dnnl_AcdeB8a4b),
BAc16a16b(dnnl_BAc16a16b),
BAc16b16a(dnnl_BAc16b16a),
BAcd16a16b(dnnl_BAcd16a16b),
BAcd16b16a(dnnl_BAcd16b16a),
ABcd32a32b(dnnl_ABcd32a32b),
BAcde16b16a(dnnl_BAcde16b16a),
BAcde16a16b(dnnl_BAcde16a16b),
aBdec32b(dnnl_aBdec32b),
Abcdef16a(dnnl_Abcdef16a),
Abcdef32a(dnnl_Abcdef32a),
Acdb32a(dnnl_Acdb32a),
aBCd2b4c2b(dnnl_aBCd2b4c2b),
aBCde2b4c2b(dnnl_aBCde2b4c2b),
aBCdef2b4c2b(dnnl_aBCdef2b4c2b),
aBCd2c4b2c(dnnl_aBCd2c4b2c),
aBCde2c4b2c(dnnl_aBCde2c4b2c),
aBCdef2c4b2c(dnnl_aBCdef2c4b2c),
aBCd4b8c2b(dnnl_aBCd4b8c2b),
aBCde4b8c2b(dnnl_aBCde4b8c2b),
aBCdef4b8c2b(dnnl_aBCdef4b8c2b),
aBCd4c8b2c(dnnl_aBCd4c8b2c),
aBCde4c8b2c(dnnl_aBCde4c8b2c),
aBCdef4c8b2c(dnnl_aBCdef4c8b2c),
AB32a32b8a4b(dnnl_AB32a32b8a4b),
AB32a32b8a2b(dnnl_AB32a32b8a2b),
AB8a4b(dnnl_AB8a4b),
AB8a2b(dnnl_AB8a2b),
abDc16d(dnnl_abDc16d),
abDc32d(dnnl_abDc32d),
abDC32d4c(dnnl_abDC32d4c),
abCd32c(dnnl_abCd32c),
abdEc16e(dnnl_abdEc16e),
abdEc32e(dnnl_abdEc32e),
abdEC32e2c(dnnl_abdEC32e2c),
abdEC32e4c(dnnl_abdEC32e4c),
abdCe16c(dnnl_abdCe16c),
abdCe32c(dnnl_abdCe32c),
abdCE32c2e(dnnl_abdCE32c2e),
aBCdef16c16b4c(dnnl_aBCdef16c16b4c),
aBdC16b4c(dnnl_aBdC16b4c),
aBdeC16b4c(dnnl_aBdeC16b4c),
AcB16a4b(dnnl_AcB16a4b),
AcdB16a2b(dnnl_AcdB16a2b),
aBdefC16b4c(dnnl_aBdefC16b4c),
AcdeB16a4b(dnnl_AcdeB16a4b),
Acb32a(dnnl_Acb32a),
AcB32a2b(dnnl_AcB32a2b),
AcB32a4b(dnnl_AcB32a4b),
Acb48a(dnnl_Acb48a),
AcB48a2b(dnnl_AcB48a2b),
AcB48a4b(dnnl_AcB48a4b),
Acb64a(dnnl_Acb64a),
AcB64a2b(dnnl_AcB64a2b),
AcB64a4b(dnnl_AcB64a4b),
cBa2b(dnnl_cBa2b),
cBa4b(dnnl_cBa4b),
aBdc32b(dnnl_aBdc32b),
aBdC32b2c(dnnl_aBdC32b2c),
aBdC32b4c(dnnl_aBdC32b4c),
aBdc48b(dnnl_aBdc48b),
aBdC48b2c(dnnl_aBdC48b2c),
aBdC48b4c(dnnl_aBdC48b4c),
aBdc64b(dnnl_aBdc64b),
aBdC64b2c(dnnl_aBdC64b2c),
aBdC64b4c(dnnl_aBdC64b4c),
adcb(dnnl_adcb),
adCb2c(dnnl_adCb2c),
adCb4c(dnnl_adCb4c),
AcdB32a2b(dnnl_AcdB32a2b),
AcdB32a4b(dnnl_AcdB32a4b),
Acdb48a(dnnl_Acdb48a),
AcdB48a2b(dnnl_AcdB48a2b),
AcdB48a4b(dnnl_AcdB48a4b),
Acdb64a(dnnl_Acdb64a),
AcdB64a2b(dnnl_AcdB64a2b),
AcdB64a4b(dnnl_AcdB64a4b),
cdBa2b(dnnl_cdBa2b),
cdBa4b(dnnl_cdBa4b),
aBdeC32b2c(dnnl_aBdeC32b2c),
aBdeC32b4c(dnnl_aBdeC32b4c),
aBdec48b(dnnl_aBdec48b),
aBdeC48b2c(dnnl_aBdeC48b2c),
aBdeC48b4c(dnnl_aBdeC48b4c),
aBdec64b(dnnl_aBdec64b),
aBdeC64b2c(dnnl_aBdeC64b2c),
aBdeC64b4c(dnnl_aBdeC64b4c),
adecb(dnnl_adecb),
adeCb2c(dnnl_adeCb2c),
adeCb4c(dnnl_adeCb4c),
Acdeb32a(dnnl_Acdeb32a),
AcdeB32a2b(dnnl_AcdeB32a2b),
AcdeB32a4b(dnnl_AcdeB32a4b),
Acdeb48a(dnnl_Acdeb48a),
AcdeB48a2b(dnnl_AcdeB48a2b),
AcdeB48a4b(dnnl_AcdeB48a4b),
Acdeb64a(dnnl_Acdeb64a),
AcdeB64a2b(dnnl_AcdeB64a2b),
AcdeB64a4b(dnnl_AcdeB64a4b),
cdeBa2b(dnnl_cdeBa2b),
cdeBa4b(dnnl_cdeBa4b),
aBdefc32b(dnnl_aBdefc32b),
aBdefC32b2c(dnnl_aBdefC32b2c),
aBdefC32b4c(dnnl_aBdefC32b4c),
aBdefc48b(dnnl_aBdefc48b),
aBdefC48b2c(dnnl_aBdefC48b2c),
aBdefC48b4c(dnnl_aBdefC48b4c),
aBdefc64b(dnnl_aBdefc64b),
aBdefC64b2c(dnnl_aBdefC64b2c),
aBdefC64b4c(dnnl_aBdefC64b4c),
adefcb(dnnl_adefcb),
adefCb2c(dnnl_adefCb2c),
adefCb4c(dnnl_adefCb4c),
ABc32a32b(dnnl_ABc32a32b),
BAc8a16b2a(dnnl_BAc8a16b2a),
BAcd8a16b2a(dnnl_BAcd8a16b2a),
ABcde8a16b2a(dnnl_ABcde8a16b2a),
aCBd8b16c2b(dnnl_aCBd8b16c2b),
BAcde8a16b2a(dnnl_BAcde8a16b2a),
aCBde8b16c2b(dnnl_aCBde8b16c2b),
ABcde32a32b(dnnl_ABcde32a32b),
ABc4a8b8a4b(dnnl_ABc4a8b8a4b),
ABcde4a8b8a4b(dnnl_ABcde4a8b8a4b),
BAc4b8a8b4a(dnnl_BAc4b8a8b4a),
BAcd4b8a8b4a(dnnl_BAcd4b8a8b4a),
BAcde4b8a8b4a(dnnl_BAcde4b8a8b4a),
aBCd4b8c8b4c(dnnl_aBCd4b8c8b4c),
aBCdef4b8c8b4c(dnnl_aBCdef4b8c8b4c),
aBCdef8b16c2b(dnnl_aBCdef8b16c2b),
aCBdef8b16c2b(dnnl_aCBdef8b16c2b),
aBdC16b2c(dnnl_aBdC16b2c),
aBdeC16b2c(dnnl_aBdeC16b2c),
aBdefC16b2c(dnnl_aBdefC16b2c),
aBedc16b(dnnl_aBedc16b),
AcB16a2b(dnnl_AcB16a2b),
AcdB16a4b(dnnl_AcdB16a4b),
AcdeB16a2b(dnnl_AcdeB16a2b),
Adcb16a(dnnl_Adcb16a),
aCBd4c8b8c4b(dnnl_aCBd4c8b8c4b),
aCBde4c8b8c4b(dnnl_aCBde4c8b8c4b),
aCBdef4c8b8c4b(dnnl_aCBdef4c8b8c4b),
ABc32a16b(dnnl_ABc32a16b),
ABcd16a32b(dnnl_ABcd16a32b),
ABcd32a16b(dnnl_ABcd32a16b),
ABcde32a16b(dnnl_ABcde32a16b),
AB48a16b(dnnl_AB48a16b),
AB48a32b(dnnl_AB48a32b),
ABc40a16b(dnnl_ABc40a16b),
ABc40a32b(dnnl_ABc40a32b),
aBC48b16c(dnnl_aBC48b16c),
aBC48b32c(dnnl_aBC48b32c),
ABcd40a16b(dnnl_ABcd40a16b),
ABcd40a32b(dnnl_ABcd40a32b),
BA16a16b(dnnl_BA16a16b),
BA16a32b(dnnl_BA16a32b),
BA16a48b(dnnl_BA16a48b),
BA16a64b(dnnl_BA16a64b),
BA16a16b2a(dnnl_BA16a16b2a),
BA16a32b2a(dnnl_BA16a32b2a),
BA16a48b2a(dnnl_BA16a48b2a),
BA16a64b2a(dnnl_BA16a64b2a),
BA16a16b4a(dnnl_BA16a16b4a),
BA16a32b4a(dnnl_BA16a32b4a),
BA16a48b4a(dnnl_BA16a48b4a),
BA16a64b4a(dnnl_BA16a64b4a),
decbA16a(dnnl_decbA16a),
decbA8a(dnnl_decbA8a),
defcbA16a(dnnl_defcbA16a),
defcbA8a(dnnl_defcbA8a),
aCB16b16c(dnnl_aCB16b16c),
aCB16b32c(dnnl_aCB16b32c),
aCB16b48c(dnnl_aCB16b48c),
aCB16b64c(dnnl_aCB16b64c),
aCB16b16c2b(dnnl_aCB16b16c2b),
aCB16b32c2b(dnnl_aCB16b32c2b),
aCB16b48c2b(dnnl_aCB16b48c2b),
aCB16b64c2b(dnnl_aCB16b64c2b),
aCB16b16c4b(dnnl_aCB16b16c4b),
aCB16b32c4b(dnnl_aCB16b32c4b),
aCB16b48c4b(dnnl_aCB16b48c4b),
aCB16b64c4b(dnnl_aCB16b64c4b),
Acb24a(dnnl_Acb24a),
Acdb24a(dnnl_Acdb24a),
Acdeb24a(dnnl_Acdeb24a),
aBdc24b(dnnl_aBdc24b),
aBdec24b(dnnl_aBdec24b),
aBdefc24b(dnnl_aBdefc24b),
AcB24a2b(dnnl_AcB24a2b),
AcdB24a2b(dnnl_AcdB24a2b),
AcdeB24a2b(dnnl_AcdeB24a2b),
aBdC24b2c(dnnl_aBdC24b2c),
aBdeC24b2c(dnnl_aBdeC24b2c),
aBdefC24b2c(dnnl_aBdefC24b2c),
AcB24a4b(dnnl_AcB24a4b),
AcdB24a4b(dnnl_AcdB24a4b),
AcdeB24a4b(dnnl_AcdeB24a4b),
aBdC24b4c(dnnl_aBdC24b4c),
aBdeC24b4c(dnnl_aBdeC24b4c),
aBdefC24b4c(dnnl_aBdefC24b4c),
AB8b32a(dnnl_AB8b32a),
ABc8b32a(dnnl_ABc8b32a),
AcB8b32a(dnnl_AcB8b32a),
ABcd8b32a(dnnl_ABcd8b32a),
AcdB8b32a(dnnl_AcdB8b32a),
ABcde8b32a(dnnl_ABcde8b32a),
AcdeB8b32a(dnnl_AcdeB8b32a),
AB8b24a(dnnl_AB8b24a),
ABc8b24a(dnnl_ABc8b24a),
AcB8b24a(dnnl_AcB8b24a),
ABcd8b24a(dnnl_ABcd8b24a),
AcdB8b24a(dnnl_AcdB8b24a),
ABcde8b24a(dnnl_ABcde8b24a),
AcdeB8b24a(dnnl_AcdeB8b24a),
AB8b16a(dnnl_AB8b16a),
ABc8b16a(dnnl_ABc8b16a),
AcB8b16a(dnnl_AcB8b16a),
ABcd8b16a(dnnl_ABcd8b16a),
AcdB8b16a(dnnl_AcdB8b16a),
ABcde8b16a(dnnl_ABcde8b16a),
AcdeB8b16a(dnnl_AcdeB8b16a),
AB8b8a(dnnl_AB8b8a),
format_tag_last(dnnl_format_tag_last),
nCdhw16c(dnnl_nCdhw16c),
nCdhw4c(dnnl_nCdhw4c),
nCdhw8c(dnnl_nCdhw8c),
nChw16c(dnnl_nChw16c),
nChw4c(dnnl_nChw4c),
nChw8c(dnnl_nChw8c),
nCw16c(dnnl_nCw16c),
nCw4c(dnnl_nCw4c),
nCw8c(dnnl_nCw8c),
NCw16n16c(dnnl_NCw16n16c),
NChw16n16c(dnnl_NChw16n16c),
NCdhw16n16c(dnnl_NCdhw16n16c),
NCdhw32n32c(dnnl_NCdhw32n32c),
NChw32n32c(dnnl_NChw32n32c),
IOhw16i16o(dnnl_IOhw16i16o),
OI16i16o(dnnl_OI16i16o),
OI16i32o(dnnl_OI16i32o),
OI16i48o(dnnl_OI16i48o),
OI16i64o(dnnl_OI16i64o),
OI8i16o2i(dnnl_OI8i16o2i),
OI8i32o2i(dnnl_OI8i32o2i),
OI8i64o2i(dnnl_OI8i64o2i),
OI4i8o4i(dnnl_OI4i8o4i),
OI4i16o4i(dnnl_OI4i16o4i),
OI4i24o4i(dnnl_OI4i24o4i),
OI4i32o4i(dnnl_OI4i32o4i),
OI4i64o4i(dnnl_OI4i64o4i),
Ohwi32o(dnnl_Ohwi32o),
IOdhw16i16o(dnnl_IOdhw16i16o),
gIOhw16i16o(dnnl_gIOhw16i16o),
gOhwi32o(dnnl_gOhwi32o),
Goidhw16g(dnnl_Goidhw16g),
IOw16o16i(dnnl_IOw16o16i),
OIw16i16o(dnnl_OIw16i16o),
OwI16i16o(dnnl_OwI16i16o),
OIw16i32o(dnnl_OIw16i32o),
OwI16i32o(dnnl_OwI16i32o),
OIw16i48o(dnnl_OIw16i48o),
OwI16i48o(dnnl_OwI16i48o),
OIw16i64o(dnnl_OIw16i64o),
OwI16i64o(dnnl_OwI16i64o),
IOw16i16o(dnnl_IOw16i16o),
gIOw16i16o(dnnl_gIOw16i16o),
OIw16o16i(dnnl_OIw16o16i),
Oiw16o(dnnl_Oiw16o),
OIw4i8o4i(dnnl_OIw4i8o4i),
OwI4i8o4i(dnnl_OwI4i8o4i),
OIw4i16o4i(dnnl_OIw4i16o4i),
OwI4i16o4i(dnnl_OwI4i16o4i),
OIw4i24o4i(dnnl_OIw4i24o4i),
OwI4i24o4i(dnnl_OwI4i24o4i),
OIw4i32o4i(dnnl_OIw4i32o4i),
OwI4i32o4i(dnnl_OwI4i32o4i),
OIw4i64o4i(dnnl_OIw4i64o4i),
OwI4i64o4i(dnnl_OwI4i64o4i),
OIw2i8o4i(dnnl_OIw2i8o4i),
OIw4i4o(dnnl_OIw4i4o),
OIw4o4i(dnnl_OIw4o4i),
Oiw4o(dnnl_Oiw4o),
OIw8i16o2i(dnnl_OIw8i16o2i),
OwI8i16o2i(dnnl_OwI8i16o2i),
OIw8i32o2i(dnnl_OIw8i32o2i),
OwI8i32o2i(dnnl_OwI8i32o2i),
OIw8i64o2i(dnnl_OIw8i64o2i),
OwI8i64o2i(dnnl_OwI8i64o2i),
OIw8i8o(dnnl_OIw8i8o),
OwI8i8o(dnnl_OwI8i8o),
OIw8o16i2o(dnnl_OIw8o16i2o),
OIw8o8i(dnnl_OIw8o8i),
OIw8o4i(dnnl_OIw8o4i),
OIw16i16o4i(dnnl_OIw16i16o4i),
OIw16i32o4i(dnnl_OIw16i32o4i),
OIw16i48o4i(dnnl_OIw16i48o4i),
OIw16i64o4i(dnnl_OIw16i64o4i),
OIw16i16o2i(dnnl_OIw16i16o2i),
OIw16i32o2i(dnnl_OIw16i32o2i),
OIw16i48o2i(dnnl_OIw16i48o2i),
OIw16i64o2i(dnnl_OIw16i64o2i),
OIw16o16i2o(dnnl_OIw16o16i2o),
Owi16o(dnnl_Owi16o),
OwI16o2i(dnnl_OwI16o2i),
Iwo16i(dnnl_Iwo16i),
IwO16i2o(dnnl_IwO16i2o),
IwO16i4o(dnnl_IwO16i4o),
Owi4o(dnnl_Owi4o),
Owi8o(dnnl_Owi8o),
OwI8o2i(dnnl_OwI8o2i),
OwI8o4i(dnnl_OwI8o4i),
IOhw16o16i(dnnl_IOhw16o16i),
Ohwi16o(dnnl_Ohwi16o),
OhwI16o2i(dnnl_OhwI16o2i),
Ihwo16i(dnnl_Ihwo16i),
IhwO16i2o(dnnl_IhwO16i2o),
IhwO16i4o(dnnl_IhwO16i4o),
Ohwi4o(dnnl_Ohwi4o),
Ohwi8o(dnnl_Ohwi8o),
OhwI8o2i(dnnl_OhwI8o2i),
OhwI8o4i(dnnl_OhwI8o4i),
OIhw16i16o(dnnl_OIhw16i16o),
OhwI16i16o(dnnl_OhwI16i16o),
OIhw16i32o(dnnl_OIhw16i32o),
OhwI16i32o(dnnl_OhwI16i32o),
OIhw16i48o(dnnl_OIhw16i48o),
OhwI16i48o(dnnl_OhwI16i48o),
OIhw16i64o(dnnl_OIhw16i64o),
OhwI16i64o(dnnl_OhwI16i64o),
OIhw16o16i(dnnl_OIhw16o16i),
Oihw16o(dnnl_Oihw16o),
OIhw4i8o4i(dnnl_OIhw4i8o4i),
OhwI4i8o4i(dnnl_OhwI4i8o4i),
OIhw4i16o4i(dnnl_OIhw4i16o4i),
OhwI4i16o4i(dnnl_OhwI4i16o4i),
OIhw4i24o4i(dnnl_OIhw4i24o4i),
OhwI4i24o4i(dnnl_OhwI4i24o4i),
OIhw4i32o4i(dnnl_OIhw4i32o4i),
OhwI4i32o4i(dnnl_OhwI4i32o4i),
OIhw4i64o4i(dnnl_OIhw4i64o4i),
OhwI4i64o4i(dnnl_OhwI4i64o4i),
OIhw4i4o(dnnl_OIhw4i4o),
OIhw4o4i(dnnl_OIhw4o4i),
Oihw4o(dnnl_Oihw4o),
OIhw8i16o2i(dnnl_OIhw8i16o2i),
OhwI8i16o2i(dnnl_OhwI8i16o2i),
OIhw8i32o2i(dnnl_OIhw8i32o2i),
OhwI8i32o2i(dnnl_OhwI8i32o2i),
OIhw8i64o2i(dnnl_OIhw8i64o2i),
OhwI8i64o2i(dnnl_OhwI8i64o2i),
OIhw8i8o(dnnl_OIhw8i8o),
OhwI8i8o(dnnl_OhwI8i8o),
OIhw8o16i2o(dnnl_OIhw8o16i2o),
OIhw8o8i(dnnl_OIhw8o8i),
OIhw8o4i(dnnl_OIhw8o4i),
OIhw2i8o4i(dnnl_OIhw2i8o4i),
IOdhw16o16i(dnnl_IOdhw16o16i),
Odhwi16o(dnnl_Odhwi16o),
OdhwI16o2i(dnnl_OdhwI16o2i),
Idhwo16i(dnnl_Idhwo16i),
IdhwO16i2o(dnnl_IdhwO16i2o),
IdhwO16i4o(dnnl_IdhwO16i4o),
Odhwi4o(dnnl_Odhwi4o),
Odhwi8o(dnnl_Odhwi8o),
OdhwI8o2i(dnnl_OdhwI8o2i),
OdhwI8o4i(dnnl_OdhwI8o4i),
OIdhw16i16o(dnnl_OIdhw16i16o),
OdhwI16i16o(dnnl_OdhwI16i16o),
OIdhw16i32o(dnnl_OIdhw16i32o),
OdhwI16i32o(dnnl_OdhwI16i32o),
OIdhw16i48o(dnnl_OIdhw16i48o),
OdhwI16i48o(dnnl_OdhwI16i48o),
OIdhw16i64o(dnnl_OIdhw16i64o),
OdhwI16i64o(dnnl_OdhwI16i64o),
OIdhw16o16i(dnnl_OIdhw16o16i),
OIdhw16o16i2o(dnnl_OIdhw16o16i2o),
Oidhw16o(dnnl_Oidhw16o),
OIdhw4i4o(dnnl_OIdhw4i4o),
OIdhw4o4i(dnnl_OIdhw4o4i),
Oidhw4o(dnnl_Oidhw4o),
OIdhw8i16o2i(dnnl_OIdhw8i16o2i),
OdhwI8i16o2i(dnnl_OdhwI8i16o2i),
OIdhw8i32o2i(dnnl_OIdhw8i32o2i),
OdhwI8i32o2i(dnnl_OdhwI8i32o2i),
OIdhw8i64o2i(dnnl_OIdhw8i64o2i),
OdhwI8i64o2i(dnnl_OdhwI8i64o2i),
OIdhw4i8o4i(dnnl_OIdhw4i8o4i),
OdhwI4i8o4i(dnnl_OdhwI4i8o4i),
OIdhw4i16o4i(dnnl_OIdhw4i16o4i),
OdhwI4i16o4i(dnnl_OdhwI4i16o4i),
OIdhw16i16o4i(dnnl_OIdhw16i16o4i),
OIdhw16i32o4i(dnnl_OIdhw16i32o4i),
OIdhw16i48o4i(dnnl_OIdhw16i48o4i),
OIdhw16i64o4i(dnnl_OIdhw16i64o4i),
OIdhw16i16o2i(dnnl_OIdhw16i16o2i),
OIdhw16i32o2i(dnnl_OIdhw16i32o2i),
OIdhw16i48o2i(dnnl_OIdhw16i48o2i),
OIdhw16i64o2i(dnnl_OIdhw16i64o2i),
OIdhw4i24o4i(dnnl_OIdhw4i24o4i),
OdhwI4i24o4i(dnnl_OdhwI4i24o4i),
OIdhw4i32o4i(dnnl_OIdhw4i32o4i),
OdhwI4i32o4i(dnnl_OdhwI4i32o4i),
OIdhw4i64o4i(dnnl_OIdhw4i64o4i),
OdhwI4i64o4i(dnnl_OdhwI4i64o4i),
OIdhw2i8o4i(dnnl_OIdhw2i8o4i),
OIdhw8i8o(dnnl_OIdhw8i8o),
OdhwI8i8o(dnnl_OdhwI8i8o),
OIdhw8o8i(dnnl_OIdhw8o8i),
OIdhw8o4i(dnnl_OIdhw8o4i),
gIOw16o16i(dnnl_gIOw16o16i),
gOIw16i16o(dnnl_gOIw16i16o),
gOIw16o16i(dnnl_gOIw16o16i),
gOiw16o(dnnl_gOiw16o),
gOIw4i16o4i(dnnl_gOIw4i16o4i),
gOIw2i8o4i(dnnl_gOIw2i8o4i),
gOIw4i4o(dnnl_gOIw4i4o),
gOIw4o4i(dnnl_gOIw4o4i),
gOiw4o(dnnl_gOiw4o),
gOIw8i16o2i(dnnl_gOIw8i16o2i),
gOIw8i8o(dnnl_gOIw8i8o),
gOIw8o16i2o(dnnl_gOIw8o16i2o),
gOIw8o8i(dnnl_gOIw8o8i),
gOIw8o4i(dnnl_gOIw8o4i),
gOIw16i16o4i(dnnl_gOIw16i16o4i),
gOIw16i16o2i(dnnl_gOIw16i16o2i),
gOIw16o16i2o(dnnl_gOIw16o16i2o),
gOwi16o(dnnl_gOwi16o),
gOwI16o2i(dnnl_gOwI16o2i),
gIwo16i(dnnl_gIwo16i),
gIwO16i2o(dnnl_gIwO16i2o),
gIwO16i4o(dnnl_gIwO16i4o),
gOwi4o(dnnl_gOwi4o),
gOwi8o(dnnl_gOwi8o),
gOwI8o2i(dnnl_gOwI8o2i),
gOwI8o4i(dnnl_gOwI8o4i),
Goiw8g(dnnl_Goiw8g),
Goiw16g(dnnl_Goiw16g),
gIOhw16o16i(dnnl_gIOhw16o16i),
gOhwi16o(dnnl_gOhwi16o),
gOhwI16o2i(dnnl_gOhwI16o2i),
gIhwo16i(dnnl_gIhwo16i),
gIhwO16i2o(dnnl_gIhwO16i2o),
gIhwO16i4o(dnnl_gIhwO16i4o),
gOhwi4o(dnnl_gOhwi4o),
gOhwi8o(dnnl_gOhwi8o),
gOhwI8o2i(dnnl_gOhwI8o2i),
gOhwI8o4i(dnnl_gOhwI8o4i),
Goihw16g(dnnl_Goihw16g),
gOIhw16i16o(dnnl_gOIhw16i16o),
gOIhw16o16i(dnnl_gOIhw16o16i),
gOihw16o(dnnl_gOihw16o),
gOIhw4i16o4i(dnnl_gOIhw4i16o4i),
gOIhw2i8o4i(dnnl_gOIhw2i8o4i),
gOIhw4i4o(dnnl_gOIhw4i4o),
gOIhw4o4i(dnnl_gOIhw4o4i),
gOihw4o(dnnl_gOihw4o),
Goihw8g(dnnl_Goihw8g),
gOIhw8i16o2i(dnnl_gOIhw8i16o2i),
gOIhw8i8o(dnnl_gOIhw8i8o),
gOIhw8o16i2o(dnnl_gOIhw8o16i2o),
OIw4o8i8o4i(dnnl_OIw4o8i8o4i),
OIdhw4o8i8o4i(dnnl_OIdhw4o8i8o4i),
OIhw4o8i8o4i(dnnl_OIhw4o8i8o4i),
OIhw2o8i8o2i(dnnl_OIhw2o8i8o2i),
gOIw4o8i8o4i(dnnl_gOIw4o8i8o4i),
gOIdhw4o8i8o4i(dnnl_gOIdhw4o8i8o4i),
gOIhw4o8i8o4i(dnnl_gOIhw4o8i8o4i),
gOIhw2o8i8o2i(dnnl_gOIhw2o8i8o2i),
OIhw16i16o4i(dnnl_OIhw16i16o4i),
OIhw16i32o4i(dnnl_OIhw16i32o4i),
OIhw16i48o4i(dnnl_OIhw16i48o4i),
OIhw16i64o4i(dnnl_OIhw16i64o4i),
OIhw16i16o2i(dnnl_OIhw16i16o2i),
OIhw16i32o2i(dnnl_OIhw16i32o2i),
OIhw16i48o2i(dnnl_OIhw16i48o2i),
OIhw16i64o2i(dnnl_OIhw16i64o2i),
OIhw16o16i2o(dnnl_OIhw16o16i2o),
gOIhw16i16o4i(dnnl_gOIhw16i16o4i),
gOIhw16i16o2i(dnnl_gOIhw16i16o2i),
gOIhw16o16i2o(dnnl_gOIhw16o16i2o),
gOIhw8o8i(dnnl_gOIhw8o8i),
gOIhw8o4i(dnnl_gOIhw8o4i),
gIOdhw16i16o(dnnl_gIOdhw16i16o),
gIOdhw16o16i(dnnl_gIOdhw16o16i),
gOdhwi16o(dnnl_gOdhwi16o),
gOdhwI16o2i(dnnl_gOdhwI16o2i),
gIdhwo16i(dnnl_gIdhwo16i),
gIdhwO16i2o(dnnl_gIdhwO16i2o),
gIdhwO16i4o(dnnl_gIdhwO16i4o),
gOdhwi4o(dnnl_gOdhwi4o),
gOdhwi8o(dnnl_gOdhwi8o),
gOdhwI8o2i(dnnl_gOdhwI8o2i),
gOdhwI8o4i(dnnl_gOdhwI8o4i),
gOIdhw16i16o(dnnl_gOIdhw16i16o),
gOIdhw16o16i(dnnl_gOIdhw16o16i),
gOIdhw16o16i2o(dnnl_gOIdhw16o16i2o),
gOidhw16o(dnnl_gOidhw16o),
gOIdhw4i4o(dnnl_gOIdhw4i4o),
gOIdhw4o4i(dnnl_gOIdhw4o4i),
gOidhw4o(dnnl_gOidhw4o),
gOIdhw8i16o2i(dnnl_gOIdhw8i16o2i),
gOIdhw4i16o4i(dnnl_gOIdhw4i16o4i),
gOIdhw16i16o4i(dnnl_gOIdhw16i16o4i),
gOIdhw16i16o2i(dnnl_gOIdhw16i16o2i),
gOIdhw2i8o4i(dnnl_gOIdhw2i8o4i),
gOIdhw8i8o(dnnl_gOIdhw8i8o),
gOIdhw8o8i(dnnl_gOIdhw8o8i),
gOIdhw8o4i(dnnl_gOIdhw8o4i),
gOIw2i4o2i(dnnl_gOIw2i4o2i),
gOIhw2i4o2i(dnnl_gOIhw2i4o2i),
gOIdhw2i4o2i(dnnl_gOIdhw2i4o2i),
gOIw2o4i2o(dnnl_gOIw2o4i2o),
gOIhw2o4i2o(dnnl_gOIhw2o4i2o),
gOIdhw2o4i2o(dnnl_gOIdhw2o4i2o),
gOIw4i8o2i(dnnl_gOIw4i8o2i),
gOIhw4i8o2i(dnnl_gOIhw4i8o2i),
gOIdhw4i8o2i(dnnl_gOIdhw4i8o2i),
gOIw4o8i2o(dnnl_gOIw4o8i2o),
gOIhw4o8i2o(dnnl_gOIhw4o8i2o),
gOIdhw4o8i2o(dnnl_gOIdhw4o8i2o),
ldOi16o(abDc16d.value),
ldOi32o(abDc32d.value),
ldOI32o4i(abDC32d4c.value),
ldgOi16o(abdEc16e.value),
ldgOi32o(abdEc32e.value),
ldgOI32o2i(abdEC32e2c.value),
ldgOI32o4i(abdEC32e4c.value),
OwI16o4i(dnnl_OwI16o4i),
OhwI16o4i(dnnl_OhwI16o4i),
gOwI16o4i(dnnl_gOwI16o4i),
gOhwI16o4i(dnnl_gOhwI16o4i),
OdhwI16o4i(dnnl_OdhwI16o4i),
gOdhwI16o4i(dnnl_gOdhwI16o4i),
Owi32o(dnnl_Owi32o),
OwI32o2i(dnnl_OwI32o2i),
OwI32o4i(dnnl_OwI32o4i),
Owi48o(dnnl_Owi48o),
OwI48o2i(dnnl_OwI48o2i),
OwI48o4i(dnnl_OwI48o4i),
Owi64o(dnnl_Owi64o),
OwI64o2i(dnnl_OwI64o2i),
OwI64o4i(dnnl_OwI64o4i),
Iwo32i(dnnl_Iwo32i),
IwO32i2o(dnnl_IwO32i2o),
IwO32i4o(dnnl_IwO32i4o),
Iwo48i(dnnl_Iwo48i),
IwO48i2o(dnnl_IwO48i2o),
IwO48i4o(dnnl_IwO48i4o),
Iwo64i(dnnl_Iwo64i),
IwO64i2o(dnnl_IwO64i2o),
IwO64i4o(dnnl_IwO64i4o),
wIo2i(dnnl_wIo2i),
wIo4i(dnnl_wIo4i),
gOwi32o(dnnl_gOwi32o),
gOwI32o2i(dnnl_gOwI32o2i),
gOwI32o4i(dnnl_gOwI32o4i),
gOwi48o(dnnl_gOwi48o),
gOwI48o2i(dnnl_gOwI48o2i),
gOwI48o4i(dnnl_gOwI48o4i),
gOwi64o(dnnl_gOwi64o),
gOwI64o2i(dnnl_gOwI64o2i),
gOwI64o4i(dnnl_gOwI64o4i),
gIwo32i(dnnl_gIwo32i),
gIwO32i2o(dnnl_gIwO32i2o),
gIwO32i4o(dnnl_gIwO32i4o),
gIwo48i(dnnl_gIwo48i),
gIwO48i2o(dnnl_gIwO48i2o),
gIwO48i4o(dnnl_gIwO48i4o),
gIwo64i(dnnl_gIwo64i),
gIwO64i2o(dnnl_gIwO64i2o),
gIwO64i4o(dnnl_gIwO64i4o),
gwio(dnnl_gwio),
gwIo2i(dnnl_gwIo2i),
gwIo4i(dnnl_gwIo4i),
OhwI32o(dnnl_OhwI32o),
OhwI32o2i(dnnl_OhwI32o2i),
OhwI32o4i(dnnl_OhwI32o4i),
Ohwi48o(dnnl_Ohwi48o),
OhwI48o2i(dnnl_OhwI48o2i),
OhwI48o4i(dnnl_OhwI48o4i),
Ohwi64o(dnnl_Ohwi64o),
OhwI64o2i(dnnl_OhwI64o2i),
OhwI64o4i(dnnl_OhwI64o4i),
Ihwo32i(dnnl_Ihwo32i),
IhwO32i2o(dnnl_IhwO32i2o),
IhwO32i4o(dnnl_IhwO32i4o),
Ihwo48i(dnnl_Ihwo48i),
IhwO48i2o(dnnl_IhwO48i2o),
IhwO48i4o(dnnl_IhwO48i4o),
Ihwo64i(dnnl_Ihwo64i),
IhwO64i2o(dnnl_IhwO64i2o),
IhwO64i4o(dnnl_IhwO64i4o),
hwIo2i(dnnl_hwIo2i),
hwIo4i(dnnl_hwIo4i),
gOhwI32o(dnnl_gOhwI32o),
gOhwI32o2i(dnnl_gOhwI32o2i),
gOhwI32o4i(dnnl_gOhwI32o4i),
gOhwi48o(dnnl_gOhwi48o),
gOhwI48o2i(dnnl_gOhwI48o2i),
gOhwI48o4i(dnnl_gOhwI48o4i),
gOhwi64o(dnnl_gOhwi64o),
gOhwI64o2i(dnnl_gOhwI64o2i),
gOhwI64o4i(dnnl_gOhwI64o4i),
gIhwo32i(dnnl_gIhwo32i),
gIhwO32i2o(dnnl_gIhwO32i2o),
gIhwO32i4o(dnnl_gIhwO32i4o),
gIhwo48i(dnnl_gIhwo48i),
gIhwO48i2o(dnnl_gIhwO48i2o),
gIhwO48i4o(dnnl_gIhwO48i4o),
gIhwo64i(dnnl_gIhwo64i),
gIhwO64i2o(dnnl_gIhwO64i2o),
gIhwO64i4o(dnnl_gIhwO64i4o),
ghwio(dnnl_ghwio),
ghwIo2i(dnnl_ghwIo2i),
ghwIo4i(dnnl_ghwIo4i),
Odhwi32o(dnnl_Odhwi32o),
OdhwI32o2i(dnnl_OdhwI32o2i),
OdhwI32o4i(dnnl_OdhwI32o4i),
Odhwi48o(dnnl_Odhwi48o),
OdhwI48o2i(dnnl_OdhwI48o2i),
OdhwI48o4i(dnnl_OdhwI48o4i),
Odhwi64o(dnnl_Odhwi64o),
OdhwI64o2i(dnnl_OdhwI64o2i),
OdhwI64o4i(dnnl_OdhwI64o4i),
Idhwo32i(dnnl_Idhwo32i),
IdhwO32i2o(dnnl_IdhwO32i2o),
IdhwO32i4o(dnnl_IdhwO32i4o),
Idhwo48i(dnnl_Idhwo48i),
IdhwO48i2o(dnnl_IdhwO48i2o),
IdhwO48i4o(dnnl_IdhwO48i4o),
Idhwo64i(dnnl_Idhwo64i),
IdhwO64i2o(dnnl_IdhwO64i2o),
IdhwO64i4o(dnnl_IdhwO64i4o),
dhwIo2i(dnnl_dhwIo2i),
dhwIo4i(dnnl_dhwIo4i),
gOdhwi32o(dnnl_gOdhwi32o),
gOdhwI32o2i(dnnl_gOdhwI32o2i),
gOdhwI32o4i(dnnl_gOdhwI32o4i),
gOdhwi48o(dnnl_gOdhwi48o),
gOdhwI48o2i(dnnl_gOdhwI48o2i),
gOdhwI48o4i(dnnl_gOdhwI48o4i),
gOdhwi64o(dnnl_gOdhwi64o),
gOdhwI64o2i(dnnl_gOdhwI64o2i),
gOdhwI64o4i(dnnl_gOdhwI64o4i),
gIdhwo32i(dnnl_gIdhwo32i),
gIdhwO32i2o(dnnl_gIdhwO32i2o),
gIdhwO32i4o(dnnl_gIdhwO32i4o),
gIdhwo48i(dnnl_gIdhwo48i),
gIdhwO48i2o(dnnl_gIdhwO48i2o),
gIdhwO48i4o(dnnl_gIdhwO48i4o),
gIdhwo64i(dnnl_gIdhwo64i),
gIdhwO64i2o(dnnl_gIdhwO64i2o),
gIdhwO64i4o(dnnl_gIdhwO64i4o),
gdhwio(dnnl_gdhwio),
gdhwIo2i(dnnl_gdhwIo2i),
gdhwIo4i(dnnl_gdhwIo4i),
ldIo32i(dnnl_ldIo32i),
ldgIo16i(dnnl_ldgIo16i),
ldgIo32i(dnnl_ldgIo32i),
ldgIO32i2o(dnnl_ldgIO32i2o),
nCdhw32c(dnnl_nCdhw32c),
nChw32c(dnnl_nChw32c),
nCw32c(dnnl_nCw32c),
NCw32n16c(dnnl_NCw32n16c),
NChw32n16c(dnnl_NChw32n16c),
NCdhw32n16c(dnnl_NCdhw32n16c),
NCw32n32c(dnnl_NCw32n32c),
OI16i16o4i(dnnl_OI16i16o4i),
IOw8o16i2o(dnnl_IOw8o16i2o),
IOhw8o16i2o(dnnl_IOhw8o16i2o),
Owhi16o(dnnl_Owhi16o),
OIdhw8o16i2o(dnnl_OIdhw8o16i2o),
IOdhw8o16i2o(dnnl_IOdhw8o16i2o),
Goiw4g(dnnl_Goiw4g),
gIOw8o16i2o(dnnl_gIOw8o16i2o),
Goiw32g(dnnl_Goiw32g),
Goihw4g(dnnl_Goihw4g),
gIOhw8o16i2o(dnnl_gIOhw8o16i2o),
Goihw32g(dnnl_Goihw32g),
gOwhi16o(dnnl_gOwhi16o),
IOw4i8o8i4o(dnnl_IOw4i8o8i4o),
IOhw4i8o8i4o(dnnl_IOhw4i8o8i4o),
IOdhw4i8o8i4o(dnnl_IOdhw4i8o8i4o),
gIOw4i8o8i4o(dnnl_gIOw4i8o8i4o),
gIOhw4i8o8i4o(dnnl_gIOhw4i8o8i4o),
gIOdhw4i8o8i4o(dnnl_gIOdhw4i8o8i4o),
gOIdhw8o16i2o(dnnl_gOIdhw8o16i2o),
gIOdhw8o16i2o(dnnl_gIOdhw8o16i2o),
Goidhw32g(dnnl_Goidhw32g),
OI16i32o4i(dnnl_OI16i32o4i),
OI16i48o4i(dnnl_OI16i48o4i),
OI16i64o4i(dnnl_OI16i64o4i),
OI16i16o2i(dnnl_OI16i16o2i),
OI16i32o2i(dnnl_OI16i32o2i),
OI16i48o2i(dnnl_OI16i48o2i),
OI16i64o2i(dnnl_OI16i64o2i),
aBdeC16c16b4c(dnnl_aBdeC16c16b4c),
AcB16b16a2b(dnnl_AcB16b16a2b),
aBdC16c16b2c(dnnl_aBdC16c16b2c),
AcB16b16a4b(dnnl_AcB16b16a4b),
aBdC16c16b4c(dnnl_aBdC16c16b4c),
AcdB16b16a2b(dnnl_AcdB16b16a2b),
aBdefC16c16b4c(dnnl_aBdefC16c16b4c),
AcdeB16b16a4b(dnnl_AcdeB16b16a4b),
AcB16b32a2b(dnnl_AcB16b32a2b),
AcB16b32a4b(dnnl_AcB16b32a4b),
AcB16b48a2b(dnnl_AcB16b48a2b),
AcB16b48a4b(dnnl_AcB16b48a4b),
AcB16b64a2b(dnnl_AcB16b64a2b),
AcB16b64a4b(dnnl_AcB16b64a4b),
aBdC16c32b2c(dnnl_aBdC16c32b2c),
aBdC16c32b4c(dnnl_aBdC16c32b4c),
aBdC16c48b2c(dnnl_aBdC16c48b2c),
aBdC16c48b4c(dnnl_aBdC16c48b4c),
aBdC16c64b2c(dnnl_aBdC16c64b2c),
aBdC16c64b4c(dnnl_aBdC16c64b4c),
AcdB16b32a2b(dnnl_AcdB16b32a2b),
AcdB16b32a4b(dnnl_AcdB16b32a4b),
AcdB16b48a2b(dnnl_AcdB16b48a2b),
AcdB16b48a4b(dnnl_AcdB16b48a4b),
AcdB16b64a2b(dnnl_AcdB16b64a2b),
AcdB16b64a4b(dnnl_AcdB16b64a4b),
aBdeC16c32b2c(dnnl_aBdeC16c32b2c),
aBdeC16c32b4c(dnnl_aBdeC16c32b4c),
aBdeC16c48b2c(dnnl_aBdeC16c48b2c),
aBdeC16c48b4c(dnnl_aBdeC16c48b4c),
aBdeC16c64b2c(dnnl_aBdeC16c64b2c),
aBdeC16c64b4c(dnnl_aBdeC16c64b4c),
AcdeB16b32a2b(dnnl_AcdeB16b32a2b),
AcdeB16b32a4b(dnnl_AcdeB16b32a4b),
AcdeB16b48a2b(dnnl_AcdeB16b48a2b),
AcdeB16b48a4b(dnnl_AcdeB16b48a4b),
AcdeB16b64a2b(dnnl_AcdeB16b64a2b),
AcdeB16b64a4b(dnnl_AcdeB16b64a4b),
aBdefC16c32b2c(dnnl_aBdefC16c32b2c),
aBdefC16c32b4c(dnnl_aBdefC16c32b4c),
aBdefC16c48b2c(dnnl_aBdefC16c48b2c),
aBdefC16c48b4c(dnnl_aBdefC16c48b4c),
aBdefC16c64b2c(dnnl_aBdefC16c64b2c),
aBdefC16c64b4c(dnnl_aBdefC16c64b4c),
OwI16i16o2i(dnnl_OwI16i16o2i),
gOwI16i16o2i(dnnl_gOwI16i16o2i),
OhwI16i16o2i(dnnl_OhwI16i16o2i),
gOhwI16i16o2i(dnnl_gOhwI16i16o2i),
OdhwI16i16o2i(dnnl_OdhwI16i16o2i),
gOdhwI16i16o2i(dnnl_gOdhwI16i16o2i),
OwI16i16o4i(dnnl_OwI16i16o4i),
gOwI16i16o4i(dnnl_gOwI16i16o4i),
OhwI16i16o4i(dnnl_OhwI16i16o4i),
gOhwI16i16o4i(dnnl_gOhwI16i16o4i),
OdhwI16i16o4i(dnnl_OdhwI16i16o4i),
gOdhwI16i16o4i(dnnl_gOdhwI16i16o4i),
OwI16i32o2i(dnnl_OwI16i32o2i),
OwI16i32o4i(dnnl_OwI16i32o4i),
OwI16i48o2i(dnnl_OwI16i48o2i),
OwI16i48o4i(dnnl_OwI16i48o4i),
OwI16i64o2i(dnnl_OwI16i64o2i),
OwI16i64o4i(dnnl_OwI16i64o4i),
gOwI16i32o2i(dnnl_gOwI16i32o2i),
gOwI16i32o4i(dnnl_gOwI16i32o4i),
gOwI16i48o2i(dnnl_gOwI16i48o2i),
gOwI16i48o4i(dnnl_gOwI16i48o4i),
gOwI16i64o2i(dnnl_gOwI16i64o2i),
gOwI16i64o4i(dnnl_gOwI16i64o4i),
OhwI16i32o2i(dnnl_OhwI16i32o2i),
OhwI16i32o4i(dnnl_OhwI16i32o4i),
OhwI16i48o2i(dnnl_OhwI16i48o2i),
OhwI16i48o4i(dnnl_OhwI16i48o4i),
OhwI16i64o2i(dnnl_OhwI16i64o2i),
OhwI16i64o4i(dnnl_OhwI16i64o4i),
gOhwI16i32o2i(dnnl_gOhwI16i32o2i),
gOhwI16i32o4i(dnnl_gOhwI16i32o4i),
gOhwI16i48o2i(dnnl_gOhwI16i48o2i),
gOhwI16i48o4i(dnnl_gOhwI16i48o4i),
gOhwI16i64o2i(dnnl_gOhwI16i64o2i),
gOhwI16i64o4i(dnnl_gOhwI16i64o4i),
OdhwI16i32o2i(dnnl_OdhwI16i32o2i),
OdhwI16i32o4i(dnnl_OdhwI16i32o4i),
OdhwI16i48o2i(dnnl_OdhwI16i48o2i),
OdhwI16i48o4i(dnnl_OdhwI16i48o4i),
OdhwI16i64o2i(dnnl_OdhwI16i64o2i),
OdhwI16i64o4i(dnnl_OdhwI16i64o4i),
IdhwO16o32i2o(dnnl_IdhwO16o32i2o),
IdhwO16o32i4o(dnnl_IdhwO16o32i4o),
IdhwO16o48i2o(dnnl_IdhwO16o48i2o),
IdhwO16o48i4o(dnnl_IdhwO16o48i4o),
IdhwO16o64i2o(dnnl_IdhwO16o64i2o),
IdhwO16o64i4o(dnnl_IdhwO16o64i4o),
gOdhwI16i32o2i(dnnl_gOdhwI16i32o2i),
gOdhwI16i32o4i(dnnl_gOdhwI16i32o4i),
gOdhwI16i48o2i(dnnl_gOdhwI16i48o2i),
gOdhwI16i48o4i(dnnl_gOdhwI16i48o4i),
gOdhwI16i64o2i(dnnl_gOdhwI16i64o2i),
gOdhwI16i64o4i(dnnl_gOdhwI16i64o4i),
gIdhwO16o32i2o(dnnl_gIdhwO16o32i2o),
gIdhwO16o32i4o(dnnl_gIdhwO16o32i4o),
gIdhwO16o48i2o(dnnl_gIdhwO16o48i2o),
gIdhwO16o48i4o(dnnl_gIdhwO16o48i4o),
gIdhwO16o64i2o(dnnl_gIdhwO16o64i2o),
gIdhwO16o64i4o(dnnl_gIdhwO16o64i4o),
IwO16o16i2o(dnnl_IwO16o16i2o),
IwO16o16i4o(dnnl_IwO16o16i4o),
IhwO16o16i2o(dnnl_IhwO16o16i2o),
IhwO16o16i4o(dnnl_IhwO16o16i4o),
IdhwO16o16i2o(dnnl_IdhwO16o16i2o),
IdhwO16o16i4o(dnnl_IdhwO16o16i4o),
gIwO16o16i2o(dnnl_gIwO16o16i2o),
gIwO16o16i4o(dnnl_gIwO16o16i4o),
gIhwO16o16i2o(dnnl_gIhwO16o16i2o),
gIhwO16o16i4o(dnnl_gIhwO16o16i4o),
gIdhwO16o16i2o(dnnl_gIdhwO16o16i2o),
gIdhwO16o16i4o(dnnl_gIdhwO16o16i4o),
IwO16o32i2o(dnnl_IwO16o32i2o),
IwO16o32i4o(dnnl_IwO16o32i4o),
IwO16o48i2o(dnnl_IwO16o48i2o),
IwO16o48i4o(dnnl_IwO16o48i4o),
IwO16o64i2o(dnnl_IwO16o64i2o),
IwO16o64i4o(dnnl_IwO16o64i4o),
gIwO16o32i2o(dnnl_gIwO16o32i2o),
gIwO16o32i4o(dnnl_gIwO16o32i4o),
gIwO16o48i2o(dnnl_gIwO16o48i2o),
gIwO16o48i4o(dnnl_gIwO16o48i4o),
gIwO16o64i2o(dnnl_gIwO16o64i2o),
gIwO16o64i4o(dnnl_gIwO16o64i4o),
IhwO16o32i2o(dnnl_IhwO16o32i2o),
IhwO16o32i4o(dnnl_IhwO16o32i4o),
IhwO16o48i2o(dnnl_IhwO16o48i2o),
IhwO16o48i4o(dnnl_IhwO16o48i4o),
IhwO16o64i2o(dnnl_IhwO16o64i2o),
IhwO16o64i4o(dnnl_IhwO16o64i4o),
gIhwO16o32i2o(dnnl_gIhwO16o32i2o),
gIhwO16o32i4o(dnnl_gIhwO16o32i4o),
gIhwO16o48i2o(dnnl_gIhwO16o48i2o),
gIhwO16o48i4o(dnnl_gIhwO16o48i4o),
gIhwO16o64i2o(dnnl_gIhwO16o64i2o),
gIhwO16o64i4o(dnnl_gIhwO16o64i4o),
aBdeC16c16b2c(dnnl_aBdeC16c16b2c),
aBdefC16c16b2c(dnnl_aBdefC16c16b2c),
AcdB16b16a4b(dnnl_AcdB16b16a4b),
AcdeB16b16a2b(dnnl_AcdeB16b16a2b),
hwioG16g(dnnl_hwioG16g),
hwioG8g(dnnl_hwioG8g),
dhwioG16g(dnnl_dhwioG16g),
dhwioG8g(dnnl_dhwioG8g),
ABc4a2b(dnnl_ABc4a2b),
ABc8a2b(dnnl_ABc8a2b),
ABcd4a2b(dnnl_ABcd4a2b),
ABcde4a2b(dnnl_ABcde4a2b),
ABcde8a2b(dnnl_ABcde8a2b),
ABcd4a8b8a2b(dnnl_ABcd4a8b8a2b),
NCdhw40n32c(dnnl_NCdhw40n32c),
NChw40n32c(dnnl_NChw40n32c),
NCw40n32c(dnnl_NCw40n32c),
OIdhw4o8i8o2i(dnnl_OIdhw4o8i8o2i),
OIhw4o8i8o2i(dnnl_OIhw4o8i8o2i),
OIw4o8i8o2i(dnnl_OIw4o8i8o2i),
gOIdhw4o8i8o2i(dnnl_gOIdhw4o8i8o2i),
gOIhw4o8i8o2i(dnnl_gOIhw4o8i8o2i),
gOIw4o8i8o2i(dnnl_gOIw4o8i8o2i),
IOdhw4i8o8i2o(dnnl_IOdhw4i8o8i2o),
IOhw4i8o8i2o(dnnl_IOhw4i8o8i2o),
IOw4i8o8i2o(dnnl_IOw4i8o8i2o),
gIOdhw4i8o8i2o(dnnl_gIOdhw4i8o8i2o),
gIOhw4i8o8i2o(dnnl_gIOhw4i8o8i2o),
gIOw4i8o8i2o(dnnl_gIOw4i8o8i2o),
aBCd8b2c(dnnl_aBCd8b2c),
ABcde40a16b(dnnl_ABcde40a16b),
ABcde40a32b(dnnl_ABcde40a32b),
aBCde8b2c(dnnl_aBCde8b2c),
ABcde4a8b8a2b(dnnl_ABcde4a8b8a2b),
ABc4a8b8a2b(dnnl_ABc4a8b8a2b),
aBCdef4b8c8b2c(dnnl_aBCdef4b8c8b2c),
aBCde4b8c8b2c(dnnl_aBCde4b8c8b2c),
aBCd4b8c8b2c(dnnl_aBCd4b8c8b2c),
BAcde4b8a8b2a(dnnl_BAcde4b8a8b2a),
BAcd4b8a8b2a(dnnl_BAcd4b8a8b2a),
BAc4b8a8b2a(dnnl_BAc4b8a8b2a),
aCBdef4c8b8c2b(dnnl_aCBdef4c8b8c2b),
aCBde4c8b8c2b(dnnl_aCBde4c8b8c2b),
aCBd4c8b8c2b(dnnl_aCBd4c8b8c2b),
aBCdef8b2c(dnnl_aBCdef8b2c),
AB32a16b(dnnl_AB32a16b),
AB32a32b(dnnl_AB32a32b),
BA4b8a8b2a(dnnl_BA4b8a8b2a),
BA4b8a8b4a(dnnl_BA4b8a8b4a),
aBC32b16c(dnnl_aBC32b16c),
aBC32b32c(dnnl_aBC32b32c),
aCB4c8b8c2b(dnnl_aCB4c8b8c2b),
aCB4c8b8c4b(dnnl_aCB4c8b8c4b),
ABc2b8a16b4a(dnnl_ABc2b8a16b4a),
ABcd2b8a16b4a(dnnl_ABcd2b8a16b4a),
ABcde2b8a16b4a(dnnl_ABcde2b8a16b4a),
ABc2a8b16a4b(dnnl_ABc2a8b16a4b),
ABc2a8b16a2b(dnnl_ABc2a8b16a2b),
ABc2b32a8b(dnnl_ABc2b32a8b),
ABcd2a8b16a4b(dnnl_ABcd2a8b16a4b),
ABcd2a8b16a2b(dnnl_ABcd2a8b16a2b),
aCBd2c8b16c2b(dnnl_aCBd2c8b16c2b),
ABcd2b32a8b(dnnl_ABcd2b32a8b),
aBCd2c8b16c2b(dnnl_aBCd2c8b16c2b),
ABcde2a8b16a4b(dnnl_ABcde2a8b16a4b),
ABcde2a8b16a2b(dnnl_ABcde2a8b16a2b),
aCBde2c8b16c2b(dnnl_aCBde2c8b16c2b),
ABcde2b32a8b(dnnl_ABcde2b32a8b),
aBC2b8c16b2c(dnnl_aBC2b8c16b2c),
aBCd2b8c16b2c(dnnl_aBCd2b8c16b2c),
aBCde2b8c16b2c(dnnl_aBCde2b8c16b2c),
aBCdef2b8c16b2c(dnnl_aBCdef2b8c16b2c),
BAcde2b8a16b4a(dnnl_BAcde2b8a16b4a),
BAcd2b8a16b4a(dnnl_BAcd2b8a16b4a),
BAc2b8a16b4a(dnnl_BAc2b8a16b4a),
BAcde2b8a16b2a(dnnl_BAcde2b8a16b2a),
BAcd2b8a16b2a(dnnl_BAcd2b8a16b2a),
BAc2b8a16b2a(dnnl_BAc2b8a16b2a),
aBCde2c8b16c2b(dnnl_aBCde2c8b16c2b),
aBCdef2c8b16c2b(dnnl_aBCdef2c8b16c2b),
aCBdef2c8b16c2b(dnnl_aCBdef2c8b16c2b),
aBCd2b8c16b4c(dnnl_aBCd2b8c16b4c),
aBCde2b8c16b4c(dnnl_aBCde2b8c16b4c),
NCdhw40n16c(dnnl_NCdhw40n16c),
NCw40n16c(dnnl_NCw40n16c),
NChw40n16c(dnnl_NChw40n16c),
NCw2c32n8c(dnnl_NCw2c32n8c),
NChw2c32n8c(dnnl_NChw2c32n8c),
NCdhw2c32n8c(dnnl_NCdhw2c32n8c),
OIw2i8o16i4o(dnnl_OIw2i8o16i4o),
OIhw2i8o16i4o(dnnl_OIhw2i8o16i4o),
OIdhw2i8o16i4o(dnnl_OIdhw2i8o16i4o),
OIw2o8i16o4i(dnnl_OIw2o8i16o4i),
OIw2o8i16o2i(dnnl_OIw2o8i16o2i),
IOw2i8o16i4o(dnnl_IOw2i8o16i4o),
IOw2i8o16i2o(dnnl_IOw2i8o16i2o),
OIhw2o8i16o4i(dnnl_OIhw2o8i16o4i),
OIhw2o8i16o2i(dnnl_OIhw2o8i16o2i),
IOhw2i8o16i4o(dnnl_IOhw2i8o16i4o),
IOhw2i8o16i2o(dnnl_IOhw2i8o16i2o),
OIdhw2o8i16o4i(dnnl_OIdhw2o8i16o4i),
OIdhw2o8i16o2i(dnnl_OIdhw2o8i16o2i),
IOdhw2i8o16i4o(dnnl_IOdhw2i8o16i4o),
IOdhw2i8o16i2o(dnnl_IOdhw2i8o16i2o),
gOIw2o8i16o2i(dnnl_gOIw2o8i16o2i),
gIOw2i8o16i2o(dnnl_gIOw2i8o16i2o),
gIOhw2i8o16i2o(dnnl_gIOhw2i8o16i2o),
gIOdhw2i8o16i2o(dnnl_gIOdhw2i8o16i2o),
gOIhw2o8i16o2i(dnnl_gOIhw2o8i16o2i),
gOIdhw2o8i16o2i(dnnl_gOIdhw2o8i16o2i),
gOIw2o8i16o4i(dnnl_gOIw2o8i16o4i),
gOIhw2o8i16o4i(dnnl_gOIhw2o8i16o4i),
BA4b8a16b2a(dnnl_BA4b8a16b2a),
BA4b8a16b4a(dnnl_BA4b8a16b4a),
aCB4c8b16c2b(dnnl_aCB4c8b16c2b),
aCB4c8b16c4b(dnnl_aCB4c8b16c4b),
aCB16c2b(dnnl_aCB16c2b),
aCB16c4b(dnnl_aCB16c4b),
BA16b2a(dnnl_BA16b2a),
BA16b4a(dnnl_BA16b4a),
BA4b4a(dnnl_BA4b4a),
BA8b4a(dnnl_BA8b4a),
aBC16b16c(dnnl_aBC16b16c),
aBC16b32c(dnnl_aBC16b32c),
AB16a16b(dnnl_AB16a16b),
AB16a32b(dnnl_AB16a32b),
ABcde16a16b2a(dnnl_ABcde16a16b2a),
aBCdef16b16c2b(dnnl_aBCdef16b16c2b),
Acedb16a(dnnl_Acedb16a),
aBdfec16b(dnnl_aBdfec16b),
Odwhi16o(dnnl_Odwhi16o),
gOdwhi16o(dnnl_gOdwhi16o),
abdEC64e2c(dnnl_abdEC64e2c),
abdEC64e4c(dnnl_abdEC64e4c),
ldgOI64o2i(abdEC64e2c.value),
ldgOI64o4i(abdEC64e4c.value),
abCd4c(dnnl_abCd4c),
abCde4c(dnnl_abCde4c),
abCdef4c(dnnl_abCdef4c),
abCde32c(dnnl_abCde32c),
abCdef32c(dnnl_abCdef32c),
aCdefB16b32c2b(dnnl_aCdefB16b32c2b),
aCdefB16b32c4b(dnnl_aCdefB16b32c4b),
aCdefB16b48c2b(dnnl_aCdefB16b48c2b),
aCdefB16b48c4b(dnnl_aCdefB16b48c4b),
aCdefB16b64c2b(dnnl_aCdefB16b64c2b),
aCdefB16b64c4b(dnnl_aCdefB16b64c4b),
BcdeA16a32b2a(dnnl_BcdeA16a32b2a),
BcdeA16a32b4a(dnnl_BcdeA16a32b4a),
BcdeA16a48b2a(dnnl_BcdeA16a48b2a),
BcdeA16a48b4a(dnnl_BcdeA16a48b4a),
BcdeA16a64b2a(dnnl_BcdeA16a64b2a),
BcdeA16a64b4a(dnnl_BcdeA16a64b4a),
aCdefb32c(dnnl_aCdefb32c),
aCdefB32c2b(dnnl_aCdefB32c2b),
aCdefB32c4b(dnnl_aCdefB32c4b),
aCdefb48c(dnnl_aCdefb48c),
aCdefB48c2b(dnnl_aCdefB48c2b),
aCdefB48c4b(dnnl_aCdefB48c4b),
aCdefb64c(dnnl_aCdefb64c),
aCdefB64c2b(dnnl_aCdefB64c2b),
aCdefB64c4b(dnnl_aCdefB64c4b),
Bcdea32b(dnnl_Bcdea32b),
BcdeA32b2a(dnnl_BcdeA32b2a),
BcdeA32b4a(dnnl_BcdeA32b4a),
Bcdea48b(dnnl_Bcdea48b),
BcdeA48b2a(dnnl_BcdeA48b2a),
BcdeA48b4a(dnnl_BcdeA48b4a),
Bcdea64b(dnnl_Bcdea64b),
BcdeA64b2a(dnnl_BcdeA64b2a),
BcdeA64b4a(dnnl_BcdeA64b4a),
Bca32b(dnnl_Bca32b),
BcA32b2a(dnnl_BcA32b2a),
BcA32b4a(dnnl_BcA32b4a),
Bca48b(dnnl_Bca48b),
BcA48b2a(dnnl_BcA48b2a),
BcA48b4a(dnnl_BcA48b4a),
Bca64b(dnnl_Bca64b),
BcA64b2a(dnnl_BcA64b2a),
BcA64b4a(dnnl_BcA64b4a),
aCdb32c(dnnl_aCdb32c),
aCdB32c2b(dnnl_aCdB32c2b),
aCdB32c4b(dnnl_aCdB32c4b),
aCdb48c(dnnl_aCdb48c),
aCdB48c2b(dnnl_aCdB48c2b),
aCdB48c4b(dnnl_aCdB48c4b),
aCdb64c(dnnl_aCdb64c),
aCdB64c2b(dnnl_aCdB64c2b),
aCdB64c4b(dnnl_aCdB64c4b),
BcA16a16b2a(dnnl_BcA16a16b2a),
BcA16a16b4a(dnnl_BcA16a16b4a),
BcdA16a16b2a(dnnl_BcdA16a16b2a),
BcdA16a16b4a(dnnl_BcdA16a16b4a),
BcdeA16a16b2a(dnnl_BcdeA16a16b2a),
BcdeA16a16b4a(dnnl_BcdeA16a16b4a),
aCdB16b16c2b(dnnl_aCdB16b16c2b),
aCdB16b16c4b(dnnl_aCdB16b16c4b),
aCdeB16b16c2b(dnnl_aCdeB16b16c2b),
aCdeB16b16c4b(dnnl_aCdeB16b16c4b),
aCdefB16b16c2b(dnnl_aCdefB16b16c2b),
aCdefB16b16c4b(dnnl_aCdefB16b16c4b),
BcA16a32b2a(dnnl_BcA16a32b2a),
BcA16a32b4a(dnnl_BcA16a32b4a),
BcA16a48b2a(dnnl_BcA16a48b2a),
BcA16a48b4a(dnnl_BcA16a48b4a),
BcA16a64b2a(dnnl_BcA16a64b2a),
BcA16a64b4a(dnnl_BcA16a64b4a),
aCdB16b32c2b(dnnl_aCdB16b32c2b),
aCdB16b32c4b(dnnl_aCdB16b32c4b),
aCdB16b48c2b(dnnl_aCdB16b48c2b),
aCdB16b48c4b(dnnl_aCdB16b48c4b),
aCdB16b64c2b(dnnl_aCdB16b64c2b),
aCdB16b64c4b(dnnl_aCdB16b64c4b),
BcdA16a32b2a(dnnl_BcdA16a32b2a),
BcdA16a32b4a(dnnl_BcdA16a32b4a),
BcdA16a48b2a(dnnl_BcdA16a48b2a),
BcdA16a48b4a(dnnl_BcdA16a48b4a),
BcdA16a64b2a(dnnl_BcdA16a64b2a),
BcdA16a64b4a(dnnl_BcdA16a64b4a),
aCdeB16b32c2b(dnnl_aCdeB16b32c2b),
aCdeB16b32c4b(dnnl_aCdeB16b32c4b),
aCdeB16b48c2b(dnnl_aCdeB16b48c2b),
aCdeB16b48c4b(dnnl_aCdeB16b48c4b),
aCdeB16b64c2b(dnnl_aCdeB16b64c2b),
aCdeB16b64c4b(dnnl_aCdeB16b64c4b),
Bca16b(dnnl_Bca16b),
BcA16b2a(dnnl_BcA16b2a),
BcA16b4a(dnnl_BcA16b4a),
Bcda16b(dnnl_Bcda16b),
BcdA16b2a(dnnl_BcdA16b2a),
BcdA16b4a(dnnl_BcdA16b4a),
Bcdea16b(dnnl_Bcdea16b),
BcdeA16b2a(dnnl_BcdeA16b2a),
BcdeA16b4a(dnnl_BcdeA16b4a),
aCdb16c(dnnl_aCdb16c),
aCdB16c2b(dnnl_aCdB16c2b),
aCdB16c4b(dnnl_aCdB16c4b),
aCdeb16c(dnnl_aCdeb16c),
aCdeB16c2b(dnnl_aCdeB16c2b),
aCdeB16c4b(dnnl_aCdeB16c4b),
aCdefb16c(dnnl_aCdefb16c),
aCdefB16c2b(dnnl_aCdefB16c2b),
aCdefB16c4b(dnnl_aCdefB16c4b),
Bcda32b(dnnl_Bcda32b),
BcdA32b2a(dnnl_BcdA32b2a),
BcdA32b4a(dnnl_BcdA32b4a),
Bcda48b(dnnl_Bcda48b),
BcdA48b2a(dnnl_BcdA48b2a),
BcdA48b4a(dnnl_BcdA48b4a),
Bcda64b(dnnl_Bcda64b),
BcdA64b2a(dnnl_BcdA64b2a),
BcdA64b4a(dnnl_BcdA64b4a),
aCdeb32c(dnnl_aCdeb32c),
aCdeB32c2b(dnnl_aCdeB32c2b),
aCdeB32c4b(dnnl_aCdeB32c4b),
aCdeb48c(dnnl_aCdeb48c),
aCdeB48c2b(dnnl_aCdeB48c2b),
aCdeB48c4b(dnnl_aCdeB48c4b),
aCdeb64c(dnnl_aCdeb64c),
aCdeB64c2b(dnnl_aCdeB64c2b),
aCdeB64c4b(dnnl_aCdeB64c4b),
NChw16n32c(dnnl_NChw16n32c),
goIw4i(dnnl_goIw4i),
goIw32i(dnnl_goIw32i),
goIhw4i(dnnl_goIhw4i),
goIhw32i(dnnl_goIhw32i),
goIdhw4i(dnnl_goIdhw4i),
goIdhw32i(dnnl_goIdhw32i),
cab(dnnl_cab),
cdab(dnnl_cdab),
cdeab(dnnl_cdeab),
woi(dnnl_woi),
hwoi(dnnl_hwoi),
dhwoi(dnnl_dhwoi),
Owi24o(dnnl_Owi24o),
Ohwi24o(dnnl_Ohwi24o),
Odhwi24o(dnnl_Odhwi24o),
gOwi24o(dnnl_gOwi24o),
gOhwi24o(dnnl_gOhwi24o),
gOdhwi24o(dnnl_gOdhwi24o),
OwI24o2i(dnnl_OwI24o2i),
OhwI24o2i(dnnl_OhwI24o2i),
OdhwI24o2i(dnnl_OdhwI24o2i),
gOwI24o2i(dnnl_gOwI24o2i),
gOhwI24o2i(dnnl_gOhwI24o2i),
gOdhwI24o2i(dnnl_gOdhwI24o2i),
OwI24o4i(dnnl_OwI24o4i),
OhwI24o4i(dnnl_OhwI24o4i),
OdhwI24o4i(dnnl_OdhwI24o4i),
gOwI24o4i(dnnl_gOwI24o4i),
gOhwI24o4i(dnnl_gOhwI24o4i),
gOdhwI24o4i(dnnl_gOdhwI24o4i),
OI8i32o(dnnl_OI8i32o),
OIw8i32o(dnnl_OIw8i32o),
OwI8i32o(dnnl_OwI8i32o),
OIhw8i32o(dnnl_OIhw8i32o),
OhwI8i32o(dnnl_OhwI8i32o),
OIdhw8i32o(dnnl_OIdhw8i32o),
OdhwI8i32o(dnnl_OdhwI8i32o),
OI8i24o(dnnl_OI8i24o),
OIw8i24o(dnnl_OIw8i24o),
OwI8i24o(dnnl_OwI8i24o),
OIhw8i24o(dnnl_OIhw8i24o),
OhwI8i24o(dnnl_OhwI8i24o),
OIdhw8i24o(dnnl_OIdhw8i24o),
OdhwI8i24o(dnnl_OdhwI8i24o),
OI8i16o(dnnl_OI8i16o),
OIw8i16o(dnnl_OIw8i16o),
OwI8i16o(dnnl_OwI8i16o),
OIhw8i16o(dnnl_OIhw8i16o),
OhwI8i16o(dnnl_OhwI8i16o),
OIdhw8i16o(dnnl_OIdhw8i16o),
OdhwI8i16o(dnnl_OdhwI8i16o),
OI8i8o(dnnl_OI8i8o),
AB4b8a4b(dnnl_AB4b8a4b),
AB4b24a4b(dnnl_AB4b24a4b),
ABc4b8a4b(dnnl_ABc4b8a4b),
AcB4b8a4b(dnnl_AcB4b8a4b),
ABc4b24a4b(dnnl_ABc4b24a4b),
AcB4b24a4b(dnnl_AcB4b24a4b),
ABcd4b8a4b(dnnl_ABcd4b8a4b),
AcdB4b8a4b(dnnl_AcdB4b8a4b),
ABcd4b24a4b(dnnl_ABcd4b24a4b),
AcdB4b24a4b(dnnl_AcdB4b24a4b),
ABcde4b8a4b(dnnl_ABcde4b8a4b),
AcdeB4b8a4b(dnnl_AcdeB4b8a4b),
ABcde4b24a4b(dnnl_ABcde4b24a4b),
AcdeB4b24a4b(dnnl_AcdeB4b24a4b),
Bca8b(dnnl_Bca8b),
BcA8b2a(dnnl_BcA8b2a),
Bcda8b(dnnl_Bcda8b),
BcdA8b2a(dnnl_BcdA8b2a),
Bcdea8b(dnnl_Bcdea8b),
BcdeA8b2a(dnnl_BcdeA8b2a),
aCdb8c(dnnl_aCdb8c),
aCdB8c2b(dnnl_aCdB8c2b),
aCdeb8c(dnnl_aCdeb8c),
aCdeB8c2b(dnnl_aCdeB8c2b),
aCdefb8c(dnnl_aCdefb8c),
aCdefB8c2b(dnnl_aCdefB8c2b),
Bca24b(dnnl_Bca24b),
BcA24b2a(dnnl_BcA24b2a),
Bcda24b(dnnl_Bcda24b),
BcdA24b2a(dnnl_BcdA24b2a),
Bcdea24b(dnnl_Bcdea24b),
BcdeA24b2a(dnnl_BcdeA24b2a),
aCdb24c(dnnl_aCdb24c),
aCdB24c2b(dnnl_aCdB24c2b),
aCdeb24c(dnnl_aCdeb24c),
aCdeB24c2b(dnnl_aCdeB24c2b),
aCdefb24c(dnnl_aCdefb24c),
aCdefB24c2b(dnnl_aCdefB24c2b),
Iwo8i(dnnl_Iwo8i),
IwO8i2o(dnnl_IwO8i2o),
Iwo24i(dnnl_Iwo24i),
IwO24i2o(dnnl_IwO24i2o),
Ihwo8i(dnnl_Ihwo8i),
IhwO8i2o(dnnl_IhwO8i2o),
Ihwo24i(dnnl_Ihwo24i),
IhwO24i2o(dnnl_IhwO24i2o),
Idhwo8i(dnnl_Idhwo8i),
IdhwO8i2o(dnnl_IdhwO8i2o),
Idhwo24i(dnnl_Idhwo24i),
IdhwO24i2o(dnnl_IdhwO24i2o),
gIwo8i(dnnl_gIwo8i),
gIwO8i2o(dnnl_gIwO8i2o),
gIwo24i(dnnl_gIwo24i),
gIwO24i2o(dnnl_gIwO24i2o),
gIhwo8i(dnnl_gIhwo8i),
gIhwO8i2o(dnnl_gIhwO8i2o),
gIhwo24i(dnnl_gIhwo24i),
gIhwO24i2o(dnnl_gIhwO24i2o),
gIdhwo8i(dnnl_gIdhwo8i),
gIdhwO8i2o(dnnl_gIdhwO8i2o),
gIdhwo24i(dnnl_gIdhwo24i),
gIdhwO24i2o(dnnl_gIdhwO24i2o),
OhwI24o(dnnl_OhwI24o),
gOhwI24o(dnnl_gOhwI24o),
AB8b24a2b(dnnl_AB8b24a2b),
ABc8b24a2b(dnnl_ABc8b24a2b),
AcB8b24a2b(dnnl_AcB8b24a2b),
ABcd8b24a2b(dnnl_ABcd8b24a2b),
AcdB8b24a2b(dnnl_AcdB8b24a2b),
ABcde8b24a2b(dnnl_ABcde8b24a2b),
AcdeB8b24a2b(dnnl_AcdeB8b24a2b),
AB8b8a2b(dnnl_AB8b8a2b),
ABc8b8a2b(dnnl_ABc8b8a2b),
AcB8b8a2b(dnnl_AcB8b8a2b),
ABcd8b8a2b(dnnl_ABcd8b8a2b),
AcdB8b8a2b(dnnl_AcdB8b8a2b),
ABcde8b8a2b(dnnl_ABcde8b8a2b),
AcdeB8b8a2b(dnnl_AcdeB8b8a2b),
OI8i8o2i(dnnl_OI8i8o2i),
OI8i24o2i(dnnl_OI8i24o2i),
OIw8i8o2i(dnnl_OIw8i8o2i),
OwI8i8o2i(dnnl_OwI8i8o2i),
OIw8i24o2i(dnnl_OIw8i24o2i),
OwI8i24o2i(dnnl_OwI8i24o2i),
OIhw8i8o2i(dnnl_OIhw8i8o2i),
OhwI8i8o2i(dnnl_OhwI8i8o2i),
OIhw8i24o2i(dnnl_OIhw8i24o2i),
OhwI8i24o2i(dnnl_OhwI8i24o2i),
OIdhw8i8o2i(dnnl_OIdhw8i8o2i),
OdhwI8i8o2i(dnnl_OdhwI8i8o2i),
OIdhw8i24o2i(dnnl_OIdhw8i24o2i),
OdhwI8i24o2i(dnnl_OdhwI8i24o2i),
BcA8b4a(dnnl_BcA8b4a),
BcdA8b4a(dnnl_BcdA8b4a),
BcdeA8b4a(dnnl_BcdeA8b4a),
aCdB8c4b(dnnl_aCdB8c4b),
aCdeB8c4b(dnnl_aCdeB8c4b),
aCdefB8c4b(dnnl_aCdefB8c4b),
BcA24b4a(dnnl_BcA24b4a),
BcdA24b4a(dnnl_BcdA24b4a),
BcdeA24b4a(dnnl_BcdeA24b4a),
aCdB24c4b(dnnl_aCdB24c4b),
aCdeB24c4b(dnnl_aCdeB24c4b),
aCdefB24c4b(dnnl_aCdefB24c4b),
ABc16a4b(dnnl_ABc16a4b),
ABcd16a4b(dnnl_ABcd16a4b),
ABcde16a4b(dnnl_ABcde16a4b),
IwO8i4o(dnnl_IwO8i4o),
IwO24i4o(dnnl_IwO24i4o),
IhwO8i4o(dnnl_IhwO8i4o),
IhwO24i4o(dnnl_IhwO24i4o),
IdhwO8i4o(dnnl_IdhwO8i4o),
IdhwO24i4o(dnnl_IdhwO24i4o),
gIwO8i4o(dnnl_gIwO8i4o),
gIwO24i4o(dnnl_gIwO24i4o),
gIhwO8i4o(dnnl_gIhwO8i4o),
gIhwO24i4o(dnnl_gIhwO24i4o),
gIdhwO8i4o(dnnl_gIdhwO8i4o),
gIdhwO24i4o(dnnl_gIdhwO24i4o);
public final int value;
private format_tag(int v) { this.value = v; }
private format_tag(format_tag e) { this.value = e.value; }
public format_tag intern() { for (format_tag e : values()) if (e.value == value) return e; return this; }
@Override public String toString() { return intern().name(); }
}
/** A memory descriptor. */
@Name("desc") public static class desc extends dnnl_memory_desc_handle {
static { Loader.load(); }
public desc() { super((Pointer)null); allocate(); }
private native void allocate();
public desc(@Const @ByRef desc arg0) { super((Pointer)null); allocate(arg0); }
private native void allocate(@Const @ByRef desc arg0);
///
public desc(dnnl_memory_desc t, @Cast("bool") boolean weak/*=false*/) { super((Pointer)null); allocate(t, weak); }
private native void allocate(dnnl_memory_desc t, @Cast("bool") boolean weak/*=false*/);
public desc(dnnl_memory_desc t) { super((Pointer)null); allocate(t); }
private native void allocate(dnnl_memory_desc t);
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public desc(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public desc(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public desc position(long position) {
return (desc)super.position(position);
}
@Override public desc getPointer(long i) {
return new desc((Pointer)this).offsetAddress(i);
}
/** Constructs a zero (empty) memory descriptor. Such a memory
* descriptor can be used to indicate absence of an argument. */
/** Constructs a memory descriptor.
*
* \note
* The logical order of dimensions corresponds to the {@code abc...}
* format tag, and the physical meaning of the dimensions depends
* both on the primitive that would operate on this memory and
* the operation context.
*
* @param adims Tensor dimensions.
* @param adata_type Data precision/type.
* @param aformat_tag Memory format tag.
* @param allow_empty A flag signifying whether construction is
* allowed to fail without throwing an exception. In this case a
* zero memory descriptor will be constructed. This flag is
* optional and defaults to false. */
///
///
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, memory.format_tag aformat_tag,
@Cast("bool") boolean allow_empty/*=false*/) { super((Pointer)null); allocate(adims, adata_type, aformat_tag, allow_empty); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, memory.format_tag aformat_tag,
@Cast("bool") boolean allow_empty/*=false*/);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, memory.format_tag aformat_tag) { super((Pointer)null); allocate(adims, adata_type, aformat_tag); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, memory.format_tag aformat_tag);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, memory.format_tag aformat_tag,
@Cast("bool") boolean allow_empty/*=false*/) { super((Pointer)null); allocate(adims, adata_type, aformat_tag, allow_empty); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, memory.format_tag aformat_tag,
@Cast("bool") boolean allow_empty/*=false*/);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, memory.format_tag aformat_tag) { super((Pointer)null); allocate(adims, adata_type, aformat_tag); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, memory.format_tag aformat_tag);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, memory.format_tag aformat_tag,
@Cast("bool") boolean allow_empty/*=false*/) { super((Pointer)null); allocate(adims, adata_type, aformat_tag, allow_empty); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, memory.format_tag aformat_tag,
@Cast("bool") boolean allow_empty/*=false*/);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, memory.format_tag aformat_tag) { super((Pointer)null); allocate(adims, adata_type, aformat_tag); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, memory.format_tag aformat_tag);
/** Constructs a memory descriptor by strides.
*
* \note
* The logical order of dimensions corresponds to the {@code abc...}
* format tag, and the physical meaning of the dimensions depends
* both on the primitive that would operate on this memory and
* the operation context.
*
* @param adims Tensor dimensions.
* @param adata_type Data precision/type.
* @param strides Strides for each dimension.
* @param allow_empty A flag signifying whether construction is
* allowed to fail without throwing an exception. In this case a
* zero memory descriptor will be constructed. This flag is
* optional and defaults to false. */
///
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer strides,
@Cast("bool") boolean allow_empty/*=false*/) { super((Pointer)null); allocate(adims, adata_type, strides, allow_empty); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer strides,
@Cast("bool") boolean allow_empty/*=false*/);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer strides) { super((Pointer)null); allocate(adims, adata_type, strides); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer strides);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer strides,
@Cast("bool") boolean allow_empty/*=false*/) { super((Pointer)null); allocate(adims, adata_type, strides, allow_empty); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer strides,
@Cast("bool") boolean allow_empty/*=false*/);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer strides) { super((Pointer)null); allocate(adims, adata_type, strides); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer strides);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] strides,
@Cast("bool") boolean allow_empty/*=false*/) { super((Pointer)null); allocate(adims, adata_type, strides, allow_empty); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] strides,
@Cast("bool") boolean allow_empty/*=false*/);
public desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] strides) { super((Pointer)null); allocate(adims, adata_type, strides); }
private native void allocate(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, memory.data_type adata_type, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] strides);
// #ifdef DNNL_EXPERIMENTAL_SPARSE
// #endif
/** Construct a memory descriptor from a C API ::dnnl_memory_desc_t
* handle. The resulting handle is not weak and the C handle will be
* destroyed during the destruction of the C++ object.
*
* @param md The C API memory descriptor. */
/** Construct a memory descriptor from a binary blob.
*
* @param blob A binary blob previously queried from a memory descriptor. */
public desc(@Cast("uint8_t*") @StdVector BytePointer blob) { super((Pointer)null); allocate(blob); }
private native void allocate(@Cast("uint8_t*") @StdVector BytePointer blob);
public desc(@Cast("uint8_t*") @StdVector ByteBuffer blob) { super((Pointer)null); allocate(blob); }
private native void allocate(@Cast("uint8_t*") @StdVector ByteBuffer blob);
public desc(@Cast("uint8_t*") @StdVector byte[] blob) { super((Pointer)null); allocate(blob); }
private native void allocate(@Cast("uint8_t*") @StdVector byte[] blob);
/** Constructs a memory descriptor for a region inside an area
* described by this memory descriptor. */
//
/** @param adims Sizes of the region.
/** @param offsets Offsets to the region from the encompassing
/** memory object in each dimension.
/** @param allow_empty A flag signifying whether construction is
/** allowed to fail without throwing an exception. In this case a
/** zero memory descriptor will be returned. This flag is optional
/** and defaults to false.
/** @return A memory descriptor for the region. */
///
///
///
///
public native @ByVal desc submemory_desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer offsets,
@Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc submemory_desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer offsets);
public native @ByVal desc submemory_desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer offsets,
@Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc submemory_desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer offsets);
public native @ByVal desc submemory_desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] offsets,
@Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc submemory_desc(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, @Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] offsets);
/** Constructs a memory descriptor by reshaping an existing one. The
* new memory descriptor inherits the data type. This operation is
* valid only for memory descriptors that have format_kind set to
* #dnnl::memory::format_kind::blocked or
* #dnnl::memory::format_kind::any.
*
* The operation ensures that the transformation of the physical memory
* format corresponds to the transformation of the logical dimensions.
* If such transformation is impossible, the function either throws an
* exception (default) or returns a zero memory descriptor depending on
* the {@code allow_empty} flag.
*
* The reshape operation can be described as a combination of the
* following basic operations:
* 1. Add a dimension of size {@code 1}. This is always possible.
* 2. Remove a dimension of size {@code 1}. This is possible only if the
* dimension has no padding (i.e.
* {@code padded_dims[dim] == dims[dim] && dims[dim] == 1}).
* 3. Split a dimension into multiple ones. This is possible only if
* the product of all tensor dimensions stays constant and the
* dimension being split does not have padding (i.e.
* {@code padded_dims[dim] = dims[dim]}).
* 4. Join multiple consecutive dimensions into a single one. As in
* the cases above, this requires that the dimensions do not have
* padding and that the memory format is such that in physical
* memory these dimensions are dense and have the same order as
* their logical counterparts. This also assumes that these
* dimensions are not blocked.
* - Here, 'dense' means:
* {@code stride for dim[i] == (stride for dim[i + 1]) * dim[i + 1]};
* - And 'same order' means:
* {@code i < j} if and only if {@code stride for dim[j] <= stride for dim[i]}.
*
* \warning
* Some combinations of physical memory layout and/or offsets or
* dimensions may result in a failure to make a reshape.
*
* @param adims New dimensions. The product of dimensions must
* remain constant.
* @param allow_empty A flag signifying whether construction is
* allowed to fail without throwing an exception. In this case a
* zero memory descriptor will be returned. This flag is optional
* and defaults to false.
* @return A new memory descriptor with new dimensions. */
///
///
///
///
///
///
public native @ByVal desc reshape(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims, @Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc reshape(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongPointer adims);
public native @ByVal desc reshape(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims, @Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc reshape(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef LongBuffer adims);
public native @ByVal desc reshape(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims, @Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc reshape(@Const @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByRef long[] adims);
/** Constructs a memory descriptor by permuting axes in an existing
* one.
*
* The physical memory layout representation is adjusted accordingly
* to maintain the consistency between the logical and physical parts
* of the memory descriptor. The new memory descriptor inherits the
* data type.
*
* The new memory descriptor inherits the data type. This operation is
* valid only for memory descriptors that have format_kind set to
* #dnnl::memory::format_kind::blocked or
* #dnnl::memory::format_kind::any.
*
* The logical axes will be permuted in the following manner:
* <pre>{@code
* for (i = 0; i < get_ndims(); i++)
* new_desc.dims()[permutation[i]] = dims()[i];
* }</pre>
*
* Example:
* <pre>{@code
* std::vector<int> permutation = {1, 0}; // swap the first and
* // the second axes
* dnnl::memory::desc in_md(
* {2, 3}, data_type, memory::format_tag::ab);
* dnnl::memory::desc expect_out_md(
* {3, 2}, data_type, memory::format_tag::ba);
*
* assert(in_md.permute_axes(permutation) == expect_out_md);
* }</pre>
*
* @param permutation Axes permutation.
* @param allow_empty A flag signifying whether construction is
* allowed to fail without throwing an exception. In this case a
* zero memory descriptor will be returned. This flag is optional
* and defaults to false.
* @return A new memory descriptor with new dimensions. */
///
public native @ByVal desc permute_axes(@StdVector IntPointer permutation,
@Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc permute_axes(@StdVector IntPointer permutation);
public native @ByVal desc permute_axes(@StdVector IntBuffer permutation,
@Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc permute_axes(@StdVector IntBuffer permutation);
public native @ByVal desc permute_axes(@StdVector int[] permutation,
@Cast("bool") boolean allow_empty/*=false*/);
public native @ByVal desc permute_axes(@StdVector int[] permutation);
/** Returns a number of dimensions of the memory descriptor.
*
* @return A number of dimensions. */
///
public native int get_ndims();
/** Returns padded dimensions of the memory descriptor.
*
* @return A copy of the padded dimensions vector. */
///
public native @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByVal LongPointer get_padded_dims();
/** Returns padded offsets of the memory descriptor.
*
* @return A copy of the padded offsets vector. */
///
public native @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByVal LongPointer get_padded_offsets();
/** Returns a submemory offset of the memory descriptor.
*
* @return A submemory offset. */
///
///
public native @Cast("dnnl::memory::dim") long get_submemory_offset();
/** Returns strides of the memory descriptor.
*
* \note
* This API is only applicable to memory descriptors with format
* kind #dnnl_blocked.
*
* @return A copy of the strides vector.
* @return An empty #dnnl::memory::dims if the memory descriptor
* does not have strides. */
///
///
public native @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByVal LongPointer get_strides();
/** Returns a number of inner blocks of the memory descriptor.
*
* \note
* This API is only applicable to memory descriptors with format
* kind #dnnl_blocked.
*
* @return A number of inner blocks. */
///
///
public native int get_inner_nblks();
/** Returns inner blocks of the memory descriptor.
*
* \note
* This API is only applicable to memory descriptors with format
* kind #dnnl_blocked.
*
* @return A copy of the inner blocks vector.
* @return An empty #dnnl::memory::dims if the memory descriptor
* does not have inner blocks. */
///
///
public native @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByVal LongPointer get_inner_blks();
/** Returns inner indices of the memory descriptor.
*
* \note
* This API is only applicable to memory descriptors with format
* kind #dnnl_blocked.
*
* @return A copy of the inner indices vector.
* @return An empty #dnnl::memory::dims if the memory descriptor
* does not have inner indices. */
///
public native @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByVal LongPointer get_inner_idxs();
// #ifdef DNNL_EXPERIMENTAL_SPARSE
// #else
/** Returns the data type of the memory descriptor.
*
* @return The data type. */
///
public native memory.data_type get_data_type();
// #endif
/** Returns the format kind of the memory descriptor.
*
* @return the format kind. */
///
public native memory.format_kind get_format_kind();
/** Returns dimensions of the memory descriptor.
*
* Potentially expensive due to the data copy involved.
* @return A copy of the dimensions vector. */
public native @Cast({"dnnl_dim_t*", "std::vector<dnnl_dim_t>&"}) @StdVector("dnnl_dim_t") @ByVal LongPointer get_dims();
// #ifdef DNNL_EXPERIMENTAL_SPARSE
// #else
/** Returns size of the memory descriptor in bytes.
* @return The number of bytes required to allocate a memory buffer
* for the memory object described by this memory descriptor
* including the padding area. */
public native @Cast("size_t") long get_size();
// #endif
/** Returns a binary blob associated with the given memory descriptor
* @return The memory descriptor blob associated with the memory descriptor */
public native @Cast("uint8_t*") @StdVector BytePointer get_blob();
/** Checks whether the memory descriptor is zero (empty).
* @return \c true if the memory descriptor describes an empty
* memory and \c false otherwise. */
public native @Cast("bool") boolean is_zero();
/** An equality operator.
* @param other Another memory descriptor.
* @return Whether this and the other memory descriptors have
* the same format tag, dimensions, strides, blocking, etc. */
public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef desc other);
/** An inequality operator.
* @param other Another memory descriptor.
* @return Whether this and the other memory descriptors describe
* different memory. */
public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef desc other);
}
/** Default constructor.
*
* Constructs an empty memory object, which can be used to indicate
* absence of a parameter. */
// #ifdef DNNL_EXPERIMENTAL_SPARSE
// #else
/** Constructs a memory object.
*
* Unless \p handle is equal to #DNNL_MEMORY_NONE, the constructed memory
* object will have the underlying buffer set. In this case, the buffer
* will be initialized as if #dnnl::memory::set_data_handle() had been
* called.
*
* @see memory::set_data_handle()
*
* @param md Memory descriptor.
* @param aengine Engine to store the data on.
* @param handle Handle of the memory buffer to use.
* - A pointer to the user-allocated buffer. In this case the library
* doesn't own the buffer.
* - The #DNNL_MEMORY_ALLOCATE special value. Instructs the library to
* allocate the buffer for the memory object. In this case the
* library owns the buffer.
* - #DNNL_MEMORY_NONE to create dnnl::memory without an underlying
* buffer. */
///
///
public memory(@Const @ByRef org.bytedeco.dnnl.memory.desc md, @Const @ByRef engine aengine, Pointer handle) { super((Pointer)null); allocate(md, aengine, handle); }
private native void allocate(@Const @ByRef org.bytedeco.dnnl.memory.desc md, @Const @ByRef engine aengine, Pointer handle);
/** Constructs a memory object.
*
* The underlying buffer for the memory will be allocated by the library.
*
* @param md Memory descriptor.
* @param aengine Engine to store the data on. */
public memory(@Const @ByRef org.bytedeco.dnnl.memory.desc md, @Const @ByRef engine aengine) { super((Pointer)null); allocate(md, aengine); }
private native void allocate(@Const @ByRef org.bytedeco.dnnl.memory.desc md, @Const @ByRef engine aengine);
// #endif
/** Returns the associated memory descriptor. */
public native @ByVal org.bytedeco.dnnl.memory.desc get_desc();
/** Returns the associated engine. */
///
public native @ByVal engine get_engine();
// #ifdef DNNL_EXPERIMENTAL_SPARSE
// #else
/** Returns the underlying memory buffer.
*
* On the CPU engine, or when using USM, this is a pointer to the
* allocated memory. */
///
public native Pointer get_data_handle();
/** Sets the underlying memory buffer.
*
* @param handle Memory buffer to use. On the CPU engine or when USM is
* used, the memory buffer is a pointer to the actual data. For OpenCL
* it is a cl_mem. It must have at least
* #dnnl::memory::desc::get_size() bytes allocated. */
///
///
///
///
///
public native void set_data_handle(Pointer handle);
/** Maps a memory object and returns a host-side pointer to a memory
* buffer with a copy of its contents.
*
* Mapping enables read/write directly from/to the memory contents for
* engines that do not support direct memory access.
*
* Mapping is an exclusive operation - a memory object cannot be used in
* other operations until it is unmapped via #dnnl::memory::unmap_data()
* call.
*
* \note
* Any primitives working with the memory should be completed before
* the memory is mapped. Use #dnnl::stream::wait() to synchronize the
* corresponding execution stream.
*
* \note
* The map_data and unmap_data functions are provided mainly for
* debug and testing purposes and their performance may be suboptimal.
*
* \tparam T Data type to return a pointer to.
* @return Pointer to the mapped memory. */
/** Unmaps a memory object and writes back any changes made to the
* previously mapped memory buffer.
*
* \note
* The map_data and unmap_data functions are provided mainly for
* debug and testing purposes and their performance may be
* suboptimal.
*
* @param mapped_ptr A pointer previously returned by
* #dnnl::memory::map_data(). */
public native void unmap_data(Pointer mapped_ptr);
// #endif
public static native @Cast("dnnl_data_type_t") int convert_to_c(data_type adata_type);
public static native @Cast("dnnl_format_tag_t") int convert_to_c(format_tag format);
}
| bytedeco/javacpp-presets | dnnl/src/gen/java/org/bytedeco/dnnl/memory.java |
1,465 | package net.i2p.crypto;
/*
* As pulled from https://github.com/nahi/siphash-java-inline
* Last commit was https://github.com/nahi/siphash-java-inline/commit/5be5c84851a28f800fcac66ced658bdbd01f31ef
* 2012-11-06
*
* Copyright 2012 Hiroshi Nakamura <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* *WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* SipHash implementation with hand inlining the SIPROUND.
*
* To know details about SipHash, see;
* "a fast short-input PRF" https://www.131002.net/siphash/
*
* SIPROUND is defined in siphash24.c that can be downloaded from the above
* site. Following license notice is subject to change based on the licensing
* policy of siphash24.c.
*
* I2P mods: add off/len version
*
* For constant keys see net.i2p.util.SipHash
*
* @since 0.9.5, Moved to net.i2p.crypto and public since 0.9.27
*/
public final class SipHashInline {
/** @since 0.9.27 */
private SipHashInline() {};
/**
* @param k0 the first 8 bytes of the key
* @param k1 the last 8 bytes of the key
*/
public static long hash24(long k0, long k1, byte[] data) {
return hash24(k0, k1, data, 0, data.length);
}
/**
* @param k0 the first 8 bytes of the key
* @param k1 the last 8 bytes of the key
*/
public static long hash24(long k0, long k1, byte[] data, int off, int len) {
long v0 = 0x736f6d6570736575L ^ k0;
long v1 = 0x646f72616e646f6dL ^ k1;
long v2 = 0x6c7967656e657261L ^ k0;
long v3 = 0x7465646279746573L ^ k1;
long m;
int last = off + (len / 8 * 8);
int i = off;
// processing 8 bytes blocks in data
while (i < last) {
// pack a block to long, as LE 8 bytes
m = ((((long) data[i++]) & 0xff) ) |
((((long) data[i++]) & 0xff) << 8) |
((((long) data[i++]) & 0xff) << 16) |
((((long) data[i++]) & 0xff) << 24) |
((((long) data[i++]) & 0xff) << 32) |
((((long) data[i++]) & 0xff) << 40) |
((((long) data[i++]) & 0xff) << 48) |
((((long) data[i++]) & 0xff) << 56);
// MSGROUND {
v3 ^= m;
/* SIPROUND wih hand reordering
*
* SIPROUND in siphash24.c:
* A: v0 += v1;
* B: v1=ROTL(v1,13);
* C: v1 ^= v0;
* D: v0=ROTL(v0,32);
* E: v2 += v3;
* F: v3=ROTL(v3,16);
* G: v3 ^= v2;
* H: v0 += v3;
* I: v3=ROTL(v3,21);
* J: v3 ^= v0;
* K: v2 += v1;
* L: v1=ROTL(v1,17);
* M: v1 ^= v2;
* N: v2=ROTL(v2,32);
*
* Each dependency:
* B -> A
* C -> A, B
* D -> C
* F -> E
* G -> E, F
* H -> D, G
* I -> H
* J -> H, I
* K -> C, G
* L -> K
* M -> K, L
* N -> M
*
* Dependency graph:
* D -> C -> B -> A
* G -> F -> E
* J -> I -> H -> D, G
* N -> M -> L -> K -> C, G
*
* Resulting parallel friendly execution order:
* -> ABCDHIJ
* -> EFGKLMN
*/
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
v0 ^= m;
// }
}
// packing the last block to long, as LE 0-7 bytes + the length in the top byte
m = 0;
for (i = off + len - 1; i >= last; --i) {
m <<= 8; m |= (long) (data[i] & 0xff);
}
m |= (long) len << 56;
// MSGROUND {
v3 ^= m;
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
v0 ^= m;
// }
// finishing...
v2 ^= 0xff;
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
// SIPROUND {
v0 += v1; v2 += v3;
v1 = (v1 << 13) | v1 >>> 51; v3 = (v3 << 16) | v3 >>> 48;
v1 ^= v0; v3 ^= v2;
v0 = (v0 << 32) | v0 >>> 32; v2 += v1;
v0 += v3; v1 = (v1 << 17) | v1 >>> 47;
v3 = (v3 << 21) | v3 >>> 43; v1 ^= v2;
v3 ^= v0; v2 = (v2 << 32) | v2 >>> 32;
// }
return v0 ^ v1 ^ v2 ^ v3;
}
/**
* Test vectors from https://www.131002.net/siphash/siphash.pdf
*/
/****
public static void main(String[] args) {
long k0 = 0x0706050403020100L;
long k1 = 0x0f0e0d0c0b0a0908L;
byte[] data = new byte[15];
for (int i = 0; i < data.length; i++) {
data[i] = (byte) i;
}
long result = hash24(k0, k1, data);
long expect = 0xa129ca6149be45e5L;
if (result == expect)
System.out.println("PASS");
else
System.out.println("FAIL expect " + Long.toString(expect, 16) +
" got " + Long.toString(result, 16));
}
****/
}
| i2p/i2p.i2p | core/java/src/net/i2p/crypto/SipHashInline.java |
1,466 | package org.httpkit.server;
import static clojure.lang.Keyword.intern;
import static org.httpkit.HttpUtils.HttpEncode;
import static org.httpkit.HttpVersion.HTTP_1_0;
import static org.httpkit.HttpVersion.HTTP_1_1;
import static org.httpkit.server.ClojureRing.BODY;
import static org.httpkit.server.ClojureRing.HEADERS;
import static org.httpkit.server.ClojureRing.buildRequestMap;
import static org.httpkit.server.ClojureRing.getStatus;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.httpkit.HeaderMap;
import org.httpkit.PrefixThreadFactory;
import org.httpkit.logger.ContextLogger;
import org.httpkit.logger.EventNames;
import org.httpkit.logger.EventLogger;
import org.httpkit.server.Frame.TextFrame;
import org.httpkit.server.Frame.BinaryFrame;
import org.httpkit.server.Frame.PingFrame;
import org.httpkit.server.Frame.PongFrame;
import clojure.lang.AFn;
import clojure.lang.IFn;
import clojure.lang.IPersistentMap;
import clojure.lang.Keyword;
import clojure.lang.PersistentArrayMap;
import clojure.lang.PersistentHashMap;
import clojure.lang.ITransientMap;
@SuppressWarnings({"rawtypes", "unchecked"})
class ClojureRing {
static final Keyword SERVER_PORT = intern("server-port");
static final Keyword SERVER_NAME = intern("server-name");
static final Keyword REMOTE_ADDR = intern("remote-addr");
static final Keyword URI = intern("uri");
static final Keyword QUERY_STRING = intern("query-string");
static final Keyword SCHEME = intern("scheme");
static final Keyword REQUEST_METHOD = intern("request-method");
static final Keyword HEADERS = intern("headers");
static final Keyword CONTENT_TYPE = intern("content-type");
static final Keyword CONTENT_LENGTH = intern("content-length");
static final Keyword CHARACTER_ENCODING = intern("character-encoding");
static final Keyword BODY = intern("body");
static final Keyword WEBSOCKET = intern("websocket?");
static final Keyword ASYC_CHANNEL = intern("async-channel");
static final Keyword START_TIME = intern("start-time");
static final Keyword HTTP = intern("http");
static final Keyword STATUS = intern("status");
public static int getStatus(Map<Keyword, Object> resp) {
int status = 200;
Object s = resp.get(STATUS);
if (s instanceof Long) {
status = ((Long) s).intValue();
} else if (s instanceof Integer) {
status = (Integer) s;
}
return status;
}
public static IPersistentMap buildRequestMap(HttpRequest req) {
// ring spec
ITransientMap m = PersistentHashMap.EMPTY.asTransient();
return
m
.assoc(SERVER_PORT, req.serverPort)
.assoc(SERVER_NAME, req.serverName)
.assoc(REMOTE_ADDR, req.getRemoteAddr())
.assoc(URI, req.uri)
.assoc(QUERY_STRING, req.queryString)
.assoc(SCHEME, HTTP) // only http is supported
.assoc(ASYC_CHANNEL, req.channel)
.assoc(WEBSOCKET, req.isWebSocket)
.assoc(REQUEST_METHOD, req.method.KEY)
.assoc(START_TIME, req.startTime)
// key is already lower cased, required by ring spec
.assoc(HEADERS, PersistentArrayMap.create(req.headers))
.assoc(CONTENT_TYPE, req.contentType)
.assoc(CONTENT_LENGTH, req.contentLength)
.assoc(CHARACTER_ENCODING, req.charset)
.assoc(BODY, req.getBody())
.persistent();
}
}
class ErrorResponse {
static final HeaderMap headers;
static {
headers = new HeaderMap();
headers.put("Content-Type", "text/plain; charset=utf-8");
}
}
@SuppressWarnings({"rawtypes", "unchecked"})
class HttpHandler implements Runnable {
final HttpRequest req;
final RespCallback cb;
final IFn handler;
final boolean isRingAsync;
final ContextLogger<String, Throwable> errorLogger;
final EventLogger<String> eventLogger;
final EventNames eventNames;
final String serverHeader;
public HttpHandler(HttpRequest req, RespCallback cb, IFn handler, boolean isRingAsync,
ContextLogger<String, Throwable> errorLogger, EventLogger<String> eventLogger, EventNames eventNames, String serverHeader) {
this.req = req;
this.cb = cb;
this.handler = handler;
this.isRingAsync = isRingAsync;
this.errorLogger = errorLogger;
this.eventLogger = eventLogger;
this.eventNames = eventNames;
this.serverHeader = serverHeader;
}
public void run() {
if (isRingAsync) {
runAsync();
}
else {
runSync();
}
}
private void runSync() {
try {
handleResponse((Map) handler.invoke(buildRequestMap(req)));
} catch (Throwable e) {
handleError(e);
}
}
private void runAsync() {
try {
handler.invoke(buildRequestMap(req),
new AFn() {
public Object invoke(Object resp) {
try {
handleResponse((Map) resp);
} catch (Throwable e) {
handleError(e);
}
return null;
}
},
new AFn() {
public Object invoke(Object e) {
handleError((Throwable) e);
return null;
}
});
} catch (Throwable e) {
handleError(e);
}
}
private void handleResponse(Map resp) throws Throwable {
if (resp == null) { // handler return null
cb.run(HttpEncode(404, new HeaderMap(), null, this.serverHeader));
eventLogger.log(eventNames.serverStatus404);
} else {
Object body = resp.get(BODY);
if (!(body instanceof AsyncChannel)) { // hijacked
HeaderMap headers = HeaderMap.camelCase((Map) resp.get(HEADERS));
if (req.version == HTTP_1_0 && req.isKeepAlive) {
headers.put("Connection", "Keep-Alive");
} else if (req.version == HTTP_1_1 && !req.isKeepAlive) {
headers.put("Connection", "Close");
}
final int status = getStatus(resp);
cb.run(HttpEncode(status, headers, body, this.serverHeader));
eventLogger.log(eventNames.serverStatusPrefix + status);
}
}
}
private void handleError(Throwable e) {
errorLogger.log(req.method + " " + req.uri, e);
eventLogger.log(eventNames.serverStatus500);
cb.run(HttpEncode(500, ErrorResponse.headers, e.getMessage(), this.serverHeader));
}
}
class LinkingRunnable implements Runnable {
private final Runnable impl;
AtomicReference<LinkingRunnable> next = new AtomicReference<LinkingRunnable>(null);
public LinkingRunnable(Runnable r) {
this.impl = r;
}
public void run() {
impl.run();
// Run all jobs in this chain without consuming extra call stack
LinkingRunnable r = this;
while (!r.next.compareAndSet(null, r)) {
r = r.next.get();
r.impl.run();
}
}
}
class WSHandler implements Runnable {
private Frame frame;
private AsyncChannel channel;
private final ContextLogger<String, Throwable> errorLogger;
private final EventLogger<String> eventLogger;
private final EventNames eventNames;
protected WSHandler(AsyncChannel channel, Frame frame,
ContextLogger<String, Throwable> errorLogger,
EventLogger<String> eventLogger, EventNames eventNames) {
this.channel = channel;
this.frame = frame;
this.errorLogger = errorLogger;
this.eventLogger = eventLogger;
this.eventNames = eventNames;
}
@Override
public void run() {
try {
if (frame instanceof TextFrame) {
channel.messageReceived(((TextFrame) frame).getText());
} else if (frame instanceof BinaryFrame) {
channel.messageReceived(frame.data);
} else if (frame instanceof PingFrame) {
channel.pingReceived(frame.data);
} else if (frame instanceof PongFrame) {
channel.pongReceived(frame.data);
} else {
errorLogger.log("Unknown frame received in websocket handler " + frame, null);
}
} catch (Throwable e) {
errorLogger.log("handle websocket frame " + frame, e);
eventLogger.log(eventNames.serverWsFrameError);
}
}
}
public class RingHandler implements IHandler {
final ExecutorService execs;
final IFn handler;
final boolean isRingAsync;
final ContextLogger<String, Throwable> errorLogger;
final EventLogger<String> eventLogger;
final EventNames eventNames;
final String serverHeader;
public RingHandler(IFn handler, boolean isRingAsync, ExecutorService execs) {
this(handler, isRingAsync, execs, ContextLogger.ERROR_PRINTER, EventLogger.NOP, EventNames.DEFAULT, "http-kit");
}
public RingHandler(int thread, IFn handler, boolean isRingAsync, String prefix, int queueSize, String serverHeader) {
this(thread, handler, isRingAsync, prefix, queueSize, serverHeader, ContextLogger.ERROR_PRINTER, EventLogger.NOP, EventNames.DEFAULT);
}
public RingHandler(int thread, IFn handler, boolean isRingAsync, String prefix, int queueSize, String serverHeader,
ContextLogger<String, Throwable> errorLogger, EventLogger<String> eventLogger, EventNames eventNames) {
this.errorLogger = errorLogger;
this.eventLogger = eventLogger;
this.eventNames = eventNames;
PrefixThreadFactory factory = new PrefixThreadFactory(prefix);
BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(queueSize);
execs = new ThreadPoolExecutor(thread, thread, 0, TimeUnit.MILLISECONDS, queue, factory);
this.handler = handler;
this.isRingAsync = isRingAsync;
this.serverHeader = serverHeader;
}
public RingHandler(IFn handler, boolean isRingAsync, ExecutorService execs,
ContextLogger<String, Throwable> errorLogger, EventLogger<String> eventLogger, EventNames eventNames,
String serverHeader) {
this.handler = handler;
this.isRingAsync = isRingAsync;
this.execs = execs;
this.errorLogger = errorLogger;
this.eventLogger = eventLogger;
this.eventNames = eventNames;
this.serverHeader = serverHeader;
}
public void handle(HttpRequest req, RespCallback cb) {
try {
execs.submit(new HttpHandler(req, cb, handler, isRingAsync, errorLogger, eventLogger, eventNames, this.serverHeader));
} catch (RejectedExecutionException e) {
errorLogger.log("failed to submit task to executor service", e);
eventLogger.log(eventNames.serverStatus503);
cb.run(HttpEncode(503, ErrorResponse.headers, "Server unavailable, please try again", this.serverHeader));
}
}
public void close(int timeoutTs) {
if (timeoutTs > 0) {
execs.shutdown();
try {
if (!execs.awaitTermination(timeoutTs, TimeUnit.MILLISECONDS)) {
execs.shutdownNow();
}
} catch (InterruptedException ie) {
execs.shutdownNow();
Thread.currentThread().interrupt();
}
} else {
execs.shutdownNow();
}
}
public void handle(AsyncChannel channel, Frame frame) {
WSHandler task = new WSHandler(channel, frame, errorLogger, eventLogger, eventNames);
// messages from the same client are handled orderly
LinkingRunnable job = new LinkingRunnable(task);
LinkingRunnable old = channel.serialTask;
channel.serialTask = job;
try {
if (old == null) { // No previous job
execs.submit(job);
} else {
if (!old.next.compareAndSet(null, job)) { // successfully append to previous task
// previous message is handled, order is guaranteed.
execs.submit(job);
}
}
} catch (RejectedExecutionException e) {
// TODO notify client if server is overloaded
errorLogger.log("increase :queue-size if this happens often", e);
eventLogger.log(eventNames.serverStatus503Todo);
}
}
public void clientClose(final AsyncChannel channel, final int status) {
clientClose(channel, status, "");
}
public void clientClose(final AsyncChannel channel, final int status, final String reason) {
if (!channel.isClosed()) { // server did not close it first
// has close handler, execute it in another thread
if (channel.hasCloseHandler()) {
try {
// no need to maintain order
execs.submit(new Runnable() {
public void run() {
try {
channel.onClose(status, reason);
} catch (Exception e) {
errorLogger.log("on close handler", e);
eventLogger.log(eventNames.serverChannelCloseError);
}
}
});
} catch (RejectedExecutionException e) {
/*
https://github.com/http-kit/http-kit/issues/152
https://github.com/http-kit/http-kit/pull/155
When stop-server get called, the thread-pool will call shutdown, wait for sometime
for work to be finished.
For websocket and long polling with closeHandler registered, we exec closeHandler
in the current thread. Get this idea from @pyr, by #155
*/
if (execs.isShutdown()) {
try {
channel.onClose(status, reason); // do it in current thread
} catch (Exception e1) {
errorLogger.log("on close handler", e);
eventLogger.log(eventNames.serverChannelCloseError);
}
} else {
errorLogger.log("increase :queue-size if this happens often", e);
eventLogger.log(eventNames.serverStatus503Todo);
}
}
} else {
// no close handler, mark the connection as closed
channel.closedRan.set(false);
}
}
}
}
| http-kit/http-kit | src/java/org/httpkit/server/RingHandler.java |
1,467 | /**
* Copyright (C) Zhang,Yuexiang (xfeep)
*
*/
package nginx.clojure;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
/**
* Mini constants needed Nginx-Clojure Basic Platform
* @author Zhang,Yuexiang (xfeep)
*
*/
public class MiniConstants {
/**
* Ring Spec (1.1) Strings : https://github.com/ring-clojure/ring/blob/master/SPEC
*/
public static final String SERVER_PORT = "server-port";
public static final String SERVER_NAME = "server-name";
public static final String REMOTE_ADDR = "remote-addr";
public static final String URI = "uri";
public static final String QUERY_STRING = "query-string";
public static final String SCHEME = "scheme";
public static final String REQUEST_METHOD = "request-method";
public static final String CONTENT_TYPE = "content-type";
public static final String CHARACTER_ENCODING = "character-encoding";
public static final String SSL_CLIENT_CERT = "ssl-client-cert";
public static final String HEADERS = "headers";
public static final String BODY = "body";
/**
* HTTP methods
* */
public static final String UNKNOWN = "UNKNOWN";
public static final String GET = "get";
public static final String HEAD = "head";
public static final String POST = "post";
public static final String PUT = "put";
public static final String DELETE = "delete";
public static final String MKCOL = "mkcol";
public static final String COPY = "copy";
public static final String MOVE = "move";
public static final String OPTIONS = "options";
public static final String PROPFIND = "propfind";
public static final String PROPPATCH = "proppatch";
public static final String LOCK = "lock";
public static final String UNLOCK = "unlock";
public static final String PATCH = "patch";
public static final String TRACE = "trace";
public static final String[] HTTP_METHODS = { UNKNOWN, GET, HEAD,
POST, PUT, DELETE, MKCOL, COPY, MOVE, OPTIONS, PROPFIND,
PROPPATCH, LOCK, UNLOCK, PATCH, TRACE };
public static Map<String, NginxHeaderHolder> KNOWN_REQ_HEADERS = new CaseInsensitiveMap<NginxHeaderHolder>();
public static Map<String, NginxHeaderHolder> KNOWN_RESP_HEADERS = new CaseInsensitiveMap<NginxHeaderHolder>();
public static NginxHeaderHolder RESP_CONTENT_TYPE_HOLDER;
public static Map<String, Long> MIME_TYPES = new HashMap<String, Long>();
public static Map<String, Long> CORE_VARS = new CaseInsensitiveMap<Long>();
public static Map<String, Long> HEADERS_NAMES = new CaseInsensitiveMap<Long>();
public static final String STATUS_STR = "status";
// public static final String BODY = RT.keyword(null, "body");
// public static final String HEADERS = RT.keyword(null, "headers");
public static final String DEFAULT_ENCODING_STR = "utf-8";
public static final Charset DEFAULT_ENCODING = Charset.forName(DEFAULT_ENCODING_STR);
public static final int NGX_CLOJURE_BUF_LAST_OF_NONE = 0;
public static final int NGX_CLOJURE_BUF_LAST_OF_CHAIN = 1;
public static final int NGX_CLOJURE_BUF_LAST_OF_RESPONSE = 2;
public static final int NGX_CHAIN_FILTER_CHUNK_NO_LAST = -1;
public static final int NGX_CHAIN_FILTER_CHUNK_HAS_LAST = -2;
public static final int NGX_CLOJURE_BUF_LAST_FLAG = 0x01;
public static final int NGX_CLOJURE_BUF_FLUSH_FLAG = 0x02;
public static final int NGX_CLOJURE_BUF_IGNORE_FILTER_FLAG = 0x04;
public static final int NGX_CLOJURE_BUF_FILE_FLAG = 0x08;
public static final int NGX_CLOJURE_BUF_MEM_FLAG = 0x10;
/**
* this constant hints whether we send java.lang.String or bytes (byte[], ByteBuffer) from app level
*/
public static final int NGX_CLOJURE_BUF_APP_MSGTXT = 0x08;
/**
* System Event : 0x00 ~ 0x1f
* App Event : 0x20 ~ 0xff
* Simple Event : 0x00 ~ 0x7f, only event id (7Byte), no message body
* Complex Event : 0x80 ~ 0xff
*/
public static final int POST_EVENT_TYPE_SYSTEM_EVENT_IDX_START = 0;
public static final int POST_EVENT_TYPE_HANDLE_RESPONSE = 0;
public static final int POST_EVENT_TYPE_CLOSE_SOCKET = 0x01;
public static final int POST_EVENT_TYPE_HIJACK_SEND = 0x02;
public static final int POST_EVENT_TYPE_HIJACK_SEND_HEADER = 0x03;
public static final int POST_EVENT_TYPE_HIJACK_SEND_RESPONSE = 0x04;
public static final int POST_EVENT_TYPE_HIJACK_WRITE = 0x05;
public static final int POST_EVENT_TYPE_PUB = 0x1e;
public static final int POST_EVENT_TYPE_POLL_TASK = 0x1f;
public static final int POST_EVENT_TYPE_SYSTEM_EVENT_IDX_END = 0x1f;
public static final int POST_EVENT_TYPE_APPICATION_EVENT_IDX_START = 0x20;
public static final int POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START = 0x80;
public static final int POST_EVENT_TYPE_COMPLEX_EVENT_IDX_END = 0xff;
public static int BYTE_ARRAY_OFFSET;
public static long STRING_CHAR_ARRAY_OFFSET;
public static long STRING_OFFSET_OFFSET;
public static final int NGX_HTTP_CLOJURE_GET_HEADER_FLAG_HEADERS_OUT = 1;
public static final int NGX_HTTP_CLOJURE_GET_HEADER_FLAG_MERGE_KEY = 2;
/*Thess consts won't be final until we think they are really stable.
* So that it can avoid some java compiler just use literal integer to replace where it is used.*/
public static int NGX_HTTP_CLOJURE_MEM_IDX_START = 0;
/* index for size of ngx_uint_t */
public static int NGX_HTTP_CLOJURE_UINT_SIZE_IDX = 0;
public static long NGX_HTTP_CLOJURE_UINT_SIZE;
public static int NGX_HTTP_CLOJURE_PTR_SIZE_IDX = 1;
public static long NGX_HTTP_CLOJURE_PTR_SIZE;
public static int NGX_HTTP_CLOJURE_SIZET_SIZE_IDX = 2;
public static long NGX_HTTP_CLOJURE_SIZET_SIZE;
public static int NGX_HTTP_CLOJURE_OFFT_SIZE_IDX = 3;
public static long NGX_HTTP_CLOJURE_OFFT_SIZE;
public static int NGX_HTTP_CLOJURE_BUFFER_SIZE_IDX = 4;
public static long NGX_HTTP_CLOJURE_BUFFER_SIZE;
/* index for size of ngx_str_t */
public static int NGX_HTTP_CLOJURE_STR_SIZE_IDX = 8;
public static long NGX_HTTP_CLOJURE_STR_SIZE;
/* field offset index for ngx_str_t */
public static int NGX_HTTP_CLOJURE_STR_LEN_IDX = 9;
public static long NGX_HTTP_CLOJURE_STR_LEN_OFFSET;
public static int NGX_HTTP_CLOJURE_STR_DATA_IDX = 10;
public static long NGX_HTTP_CLOJURE_STR_DATA_OFFSET;
/* index for size of ngx_table_elt_t */
public static int NGX_HTTP_CLOJURE_TELT_SIZE_IDX = 11;
public static long NGX_HTTP_CLOJURE_TELT_SIZE;
/* field offset index for ngx_table_elt_t */
public static int NGX_HTTP_CLOJURE_TEL_HASH_IDX = 12;
public static long NGX_HTTP_CLOJURE_TEL_HASH_OFFSET;
public static int NGX_HTTP_CLOJURE_TEL_KEY_IDX = 13;
public static long NGX_HTTP_CLOJURE_TEL_KEY_OFFSET;
public static int NGX_HTTP_CLOJURE_TEL_VALUE_IDX = 14;
public static long NGX_HTTP_CLOJURE_TEL_VALUE_OFFSET;
public static int NGX_HTTP_CLOJURE_TEL_LOWCASE_KEY_IDX = 15;
public static long NGX_HTTP_CLOJURE_TEL_LOWCASE_KEY_OFFSET;
//#if (nginx_version >= 1023000)
public static int NGX_HTTP_CLOJURE_TEL_NEXT_IDX = 96;
public static long NGX_HTTP_CLOJURE_TEL_NEXT_OFFSET;
//#endif
public static int NGX_HTTP_CLOJURE_CHAINT_SIZE_IDX = 16;
public static long NGX_HTTP_CLOJURE_CHAINT_SIZE;
public static int NGX_HTTP_CLOJURE_CHAIN_BUF_IDX = 17;
public static long NGX_HTTP_CLOJURE_CHAIN_BUF_OFFSET;
public static int NGX_HTTP_CLOJURE_CHAIN_NEXT_IDX = 18;
public static long NGX_HTTP_CLOJURE_CHAIN_NEXT_OFFSET;
public static int NGX_HTTP_CLOJURE_VARIABLET_SIZE_IDX = 19;
public static long NGX_HTTP_CLOJURE_VARIABLET_SIZE;
public static int NGX_HTTP_CLOJURE_CORE_VARIABLES_ADDR_IDX = 20;
public static long NGX_HTTP_CLOJURE_CORE_VARIABLES_ADDR;
public static int NGX_HTTP_CLOJURE_HEADERS_NAMES_ADDR_IDX = 21;
public static long NGX_HTTP_CLOJURE_HEADERS_NAMES_ADDR;
public static int NGX_HTTP_CLOJURE_ARRAYT_SIZE_IDX = 22;
public static long NGX_HTTP_CLOJURE_ARRAYT_SIZE;
public static int NGX_HTTP_CLOJURE_ARRAY_ELTS_IDX = 23;
public static long NGX_HTTP_CLOJURE_ARRAY_ELTS_OFFSET;
public static int NGX_HTTP_CLOJURE_ARRAY_NELTS_IDX = 24;
public static long NGX_HTTP_CLOJURE_ARRAY_NELTS_OFFSET;
public static int NGX_HTTP_CLOJURE_ARRAY_SIZE_IDX = 25;
public static long NGX_HTTP_CLOJURE_ARRAY_SIZE_OFFSET;
public static int NGX_HTTP_CLOJURE_ARRAY_NALLOC_IDX = 26;
public static long NGX_HTTP_CLOJURE_ARRAY_NALLOC_OFFSET;
public static int NGX_HTTP_CLOJURE_ARRAY_POOL_IDX = 27;
public static long NGX_HTTP_CLOJURE_ARRAY_POOL_OFFSET;
public static int NGX_HTTP_CLOJURE_KEYVALT_SIZE_IDX = 28;
public static long NGX_HTTP_CLOJURE_KEYVALT_SIZE;
public static int NGX_HTTP_CLOJURE_KEYVALT_KEY_IDX = 29;
public static long NGX_HTTP_CLOJURE_KEYVALT_KEY_OFFSET;
public static int NGX_HTTP_CLOJURE_KEYVALT_VALUE_IDX = 30;
public static long NGX_HTTP_CLOJURE_KEYVALT_VALUE_OFFSET;
/* index for size of ngx_http_request_t */
public static int NGX_HTTP_CLOJURE_REQT_SIZE_IDX = 32;
public static long NGX_HTTP_CLOJURE_REQT_SIZE;
/* field offset index for ngx_http_request_t */
public static int NGX_HTTP_CLOJURE_REQ_METHOD_IDX = 33;
public static long NGX_HTTP_CLOJURE_REQ_METHOD_OFFSET;
public static int NGX_HTTP_CLOJURE_REQ_URI_IDX = 34;
public static long NGX_HTTP_CLOJURE_REQ_URI_OFFSET;
public static int NGX_HTTP_CLOJURE_REQ_ARGS_IDX = 35;
public static long NGX_HTTP_CLOJURE_REQ_ARGS_OFFSET;
public static int NGX_HTTP_CLOJURE_REQ_HEADERS_IN_IDX = 36;
public static long NGX_HTTP_CLOJURE_REQ_HEADERS_IN_OFFSET;
public static int NGX_HTTP_CLOJURE_REQ_POOL_IDX = 37;
public static long NGX_HTTP_CLOJURE_REQ_POOL_OFFSET;
public static int NGX_HTTP_CLOJURE_REQ_HEADERS_OUT_IDX = 38;
public static long NGX_HTTP_CLOJURE_REQ_HEADERS_OUT_OFFSET;
public static int NGX_HTTP_CLOJURE_MIME_TYPES_ADDR_IDX = 63;
public static long NGX_HTTP_CLOJURE_MIME_TYPES_ADDR;
/*index for size of ngx_http_headers_in_t */
public static int NGX_HTTP_CLOJURE_HEADERSIT_SIZE_IDX = 64;
public static long NGX_HTTP_CLOJURE_HEADERSIT_SIZE;
/*field offset index for ngx_http_headers_in_t*/
public static int NGX_HTTP_CLOJURE_HEADERSI_HOST_IDX = 65;
public static long NGX_HTTP_CLOJURE_HEADERSI_HOST_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_CONNECTION_IDX = 66;
public static long NGX_HTTP_CLOJURE_HEADERSI_CONNECTION_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_IF_MODIFIED_SINCE_IDX = 67;
public static long NGX_HTTP_CLOJURE_HEADERSI_IF_MODIFIED_SINCE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_IF_UNMODIFIED_SINCE_IDX = 68;
public static long NGX_HTTP_CLOJURE_HEADERSI_IF_UNMODIFIED_SINCE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_USER_AGENT_IDX = 69;
public static long NGX_HTTP_CLOJURE_HEADERSI_USER_AGENT_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_REFERER_IDX = 70;
public static long NGX_HTTP_CLOJURE_HEADERSI_REFERER_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_IDX = 71;
public static long NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_CONTENT_TYPE_IDX = 72;
public static long NGX_HTTP_CLOJURE_HEADERSI_CONTENT_TYPE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_RANGE_IDX = 73;
public static long NGX_HTTP_CLOJURE_HEADERSI_RANGE_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_IF_RANGE_IDX = 74;
public static long NGX_HTTP_CLOJURE_HEADERSI_IF_RANGE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_TRANSFER_ENCODING_IDX = 75;
public static long NGX_HTTP_CLOJURE_HEADERSI_TRANSFER_ENCODING_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_EXPECT_IDX = 76;
public static long NGX_HTTP_CLOJURE_HEADERSI_EXPECT_OFFSET;
//#if (NGX_HTTP_GZIP)
public static int NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_ENCODING_IDX = 77;
public static long NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_ENCODING_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_VIA_IDX = 78;
public static long NGX_HTTP_CLOJURE_HEADERSI_VIA_OFFSET;
//#endif
public static int NGX_HTTP_CLOJURE_HEADERSI_AUTHORIZATION_IDX = 79;
public static long NGX_HTTP_CLOJURE_HEADERSI_AUTHORIZATION_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_IDX = 80;
public static long NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_OFFSET ;
//#if (NGX_HTTP_PROXY || NGX_HTTP_REALIP || NGX_HTTP_GEO)
public static int NGX_HTTP_CLOJURE_HEADERSI_X_FORWARDED_FOR_IDX = 81;
public static long NGX_HTTP_CLOJURE_HEADERSI_X_FORWARDED_FOR_OFFSET ;
//#endif
//#if (NGX_HTTP_REALIP)
public static int NGX_HTTP_CLOJURE_HEADERSI_X_REAL_IP_IDX = 82;
public static long NGX_HTTP_CLOJURE_HEADERSI_X_REAL_IP_OFFSET;
//#endif
//#if (NGX_HTTP_HEADERS)
public static int NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_IDX = 83;
public static long NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_LANGUAGE_IDX = 84;
public static long NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_LANGUAGE_OFFSET;
//#endif
//#if (NGX_HTTP_DAV)
public static int NGX_HTTP_CLOJURE_HEADERSI_DEPTH_IDX = 85;
public static long NGX_HTTP_CLOJURE_HEADERSI_DEPTH_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_DESTINATION_IDX = 86;
public static long NGX_HTTP_CLOJURE_HEADERSI_DESTINATION_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_OVERWRITE_IDX = 87;
public static long NGX_HTTP_CLOJURE_HEADERSI_OVERWRITE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSI_DATE_IDX = 88;
public static long NGX_HTTP_CLOJURE_HEADERSI_DATE_OFFSET ;
//#endif
public static int NGX_HTTP_CLOJURE_HEADERSI_USER_IDX = 89;
public static long NGX_HTTP_CLOJURE_HEADERSI_USER_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_PASSWD_IDX = 90;
public static long NGX_HTTP_CLOJURE_HEADERSI_PASSWD_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_COOKIE_IDX = 91;
public static long NGX_HTTP_CLOJURE_HEADERSI_COOKIE_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_SERVER_IDX = 92;
public static long NGX_HTTP_CLOJURE_HEADERSI_SERVER_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_N_IDX = 93;
public static long NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_N_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_N_IDX = 94;
public static long NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_N_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSI_HEADERS_IDX = 95;
public static long NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET ;
/*index for size of ngx_http_headers_out_t */
public static int NGX_HTTP_CLOJURE_HEADERSOT_SIZE_IDX = 128;
public static long NGX_HTTP_CLOJURE_HEADERSOT_SIZE;
/*field offset index for ngx_http_headers_out_t*/
public static int NGX_HTTP_CLOJURE_HEADERSO_STATUS_IDX = 129;
public static long NGX_HTTP_CLOJURE_HEADERSO_STATUS_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_STATUS_LINE_IDX = 130;
public static long NGX_HTTP_CLOJURE_HEADERSO_STATUS_LINE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_SERVER_IDX = 131;
public static long NGX_HTTP_CLOJURE_HEADERSO_SERVER_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_DATE_IDX = 132;
public static long NGX_HTTP_CLOJURE_HEADERSO_DATE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_IDX = 133;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_ENCODING_IDX = 134;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_ENCODING_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_LOCATION_IDX = 135;
public static long NGX_HTTP_CLOJURE_HEADERSO_LOCATION_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_REFRESH_IDX = 136;
public static long NGX_HTTP_CLOJURE_HEADERSO_REFRESH_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_IDX = 137;
public static long NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_RANGE_IDX = 138;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_RANGE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_ACCEPT_RANGES_IDX = 139;
public static long NGX_HTTP_CLOJURE_HEADERSO_ACCEPT_RANGES_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_WWW_AUTHENTICATE_IDX = 140;
public static long NGX_HTTP_CLOJURE_HEADERSO_WWW_AUTHENTICATE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_EXPIRES_IDX = 141;
public static long NGX_HTTP_CLOJURE_HEADERSO_EXPIRES_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_ETAG_IDX = 142;
public static long NGX_HTTP_CLOJURE_HEADERSO_ETAG_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_OVERRIDE_CHARSET_IDX = 143;
public static long NGX_HTTP_CLOJURE_HEADERSO_OVERRIDE_CHARSET_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LEN_IDX = 144;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LEN_OFFSET ;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_IDX = 145;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CHARSET_IDX = 146;
public static long NGX_HTTP_CLOJURE_HEADERSO_CHARSET_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LOWCASE_IDX = 147;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LOWCASE_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_HASH_IDX = 148;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_HASH_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CACHE_CONTROL_IDX = 149;
public static long NGX_HTTP_CLOJURE_HEADERSO_CACHE_CONTROL_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_N_IDX = 150;
public static long NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_N_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_DATE_TIME_IDX = 151;
public static long NGX_HTTP_CLOJURE_HEADERSO_DATE_TIME_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_TIME_IDX = 152;
public static long NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_TIME_OFFSET;
public static int NGX_HTTP_CLOJURE_HEADERSO_HEADERS_IDX = 153;
public static long NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET;
public static int NGX_WORKER_PROCESSORS_NUM_ID = 250;
public static long NGX_WORKER_PROCESSORS_NUM;
// public static int NGINX_CLOJURE_MODULE_CTX_PHRASE_ID = 251;
// public static long NGINX_CLOJURE_MODULE_CTX_PHRASE_ID_OFFSET;
public static int NGINX_CLOJURE_RT_WORKERS_ID = 252;
public static long NGINX_CLOJURE_RT_WORKERS;
public static int NGINX_VER_ID = 253;
public static long NGINX_VER;
public static int NGINX_CLOJURE_VER_ID = 254;
public static long NGINX_CLOJURE_VER;
public static String NGINX_CLOJURE_FULL_VER;
//these two will be updated to NGX_HTTP_CLOJURE_BUFFER_SIZE
public static int NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE = 1024 * 8;
public static int NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_LINE_SIZE = NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE/2;
public static int NGX_HTTP_CLOJURE_MEM_IDX_END = 255;
//nginx clojure java runtime required the lowest version of nginx-clojure c module
public static long NGINX_CLOJURE_RT_REQUIRED_LVER = 5002;
public static long NGINX_CLOJURE_RT_VER = 6001;
//ngx_core.h
public final static int NGX_OK = 0;
public final static int NGX_ERROR = -1;
public final static int NGX_AGAIN = -2;
public final static int NGX_BUSY = -3;
public final static int NGX_DONE = -4;
public final static int NGX_DECLINED = -5;
public final static int NGX_ABORT = -6;
public final static int NGX_HTTP_POST_READ_PHASE = 0;
public final static int NGX_HTTP_SERVER_REWRITE_PHASE = 1;
public final static int NGX_HTTP_FIND_CONFIG_PHASE =2;
public final static int NGX_HTTP_REWRITE_PHASE = 3;
public final static int NGX_HTTP_POST_REWRITE_PHASE = 4;
public final static int NGX_HTTP_PREACCESS_PHASE = 5;
public final static int NGX_HTTP_ACCESS_PHASE = 6;
public final static int NGX_HTTP_POST_ACCESS_PHASE = 7;
public final static int NGX_HTTP_TRY_FILES_PHASE = 8;
public final static int NGX_HTTP_CONTENT_PHASE = 9;
public final static int NGX_HTTP_LOG_PHASE = 10;
//fake phase for load balance handler
public final static int NGX_HTTP_LOAD_BALANCE_PHASE = 16;
//fake phase for filter
public final static int NGX_HTTP_INIT_PROCESS_PHASE = 17;
public final static int NGX_HTTP_HEADER_FILTER_PHASE = 18;
public final static int NGX_HTTP_BODY_FILTER_PHASE = 19;
public final static int NGX_HTTP_EXIT_PROCESS_PHASE = 20;
/*fake chain for header filter*/
public final static int NGX_HTTP_HEADER_FILTER = -1;
public final static int NGX_HTTP_HEADER_FILTER_IN_THREADPOOL = -2;
//ngx_http_request.h
public static int NGX_HTTP_GET = 0x0002;
public static int NGX_HTTP_HEAD = 0x0004;
public static int NGX_HTTP_POST = 0x0008;
public static int NGX_HTTP_PUT = 0x0010;
public static int NGX_HTTP_DELETE = 0x0020;
public static int NGX_HTTP_MKCOL = 0x0040;
public static int NGX_HTTP_COPY = 0x0080;
public static int NGX_HTTP_MOVE = 0x0100;
public static int NGX_HTTP_OPTIONS = 0x0200;
public static int NGX_HTTP_PROPFIND = 0x0400;
public static int NGX_HTTP_PROPPATCH = 0x0800;
public static int NGX_HTTP_LOCK = 0x1000;
public static int NGX_HTTP_UNLOCK = 0x2000;
public static int NGX_HTTP_PATCH = 0x4000;
public static int NGX_HTTP_TRACE = 0x8000;
public static int NGX_HTTP_CONTINUE = 100;
public static int NGX_HTTP_SWITCHING_PROTOCOLS = 101;
public static int NGX_HTTP_PROCESSING = 102;
public static int NGX_HTTP_OK = 200;
public static int NGX_HTTP_CREATED = 201;
public static int NGX_HTTP_ACCEPTED = 202;
public static int NGX_HTTP_NO_CONTENT = 204;
public static int NGX_HTTP_PARTIAL_CONTENT = 206;
public static int NGX_HTTP_SPECIAL_RESPONSE = 300;
public static int NGX_HTTP_MOVED_PERMANENTLY = 301;
public static int NGX_HTTP_MOVED_TEMPORARILY = 302;
public static int NGX_HTTP_SEE_OTHER = 303;
public static int NGX_HTTP_NOT_MODIFIED = 304;
public static int NGX_HTTP_TEMPORARY_REDIRECT = 307;
public static int NGX_HTTP_BAD_REQUEST = 400;
public static int NGX_HTTP_UNAUTHORIZED = 401;
public static int NGX_HTTP_FORBIDDEN = 403;
public static int NGX_HTTP_NOT_FOUND = 404;
public static int NGX_HTTP_NOT_ALLOWED = 405;
public static int NGX_HTTP_REQUEST_TIME_OUT = 408;
public static int NGX_HTTP_CONFLICT = 409;
public static int NGX_HTTP_LENGTH_REQUIRED = 411;
public static int NGX_HTTP_PRECONDITION_FAILED = 412;
public static int NGX_HTTP_REQUEST_ENTITY_TOO_LARGE = 413;
public static int NGX_HTTP_REQUEST_URI_TOO_LARGE = 414;
public static int NGX_HTTP_UNSUPPORTED_MEDIA_TYPE = 415;
public static int NGX_HTTP_RANGE_NOT_SATISFIABLE = 416;
/* Nginx own HTTP codes */
/* The special code to close connection without any response */
public static int NGX_HTTP_CLOSE = 444;
public static int NGX_HTTP_NGINX_CODES = 494;
public static int NGX_HTTP_REQUEST_HEADER_TOO_LARGE = 494;
public static int NGX_HTTPS_CERT_ERROR = 495;
public static int NGX_HTTPS_NO_CERT = 496;
/*
* We use the special code for the plain HTTP requests that are sent to
* HTTPS port to distinguish it from 4XX in an error page redirection
*/
public static int NGX_HTTP_TO_HTTPS = 497;
/* 498 is the canceled code for the requests with invalid host name */
/*
* HTTP does not define the code for the case when a client closed the
* connection while we are processing its request so we introduce own code
* to log such situation when a client has closed the connection before we
* even try to send the HTTP header to it
*/
public static int NGX_HTTP_CLIENT_CLOSED_REQUEST = 499;
public static int NGX_HTTP_INTERNAL_SERVER_ERROR = 500;
public static int NGX_HTTP_NOT_IMPLEMENTED = 501;
public static int NGX_HTTP_BAD_GATEWAY = 502;
public static int NGX_HTTP_SERVICE_UNAVAILABLE = 503;
public static int NGX_HTTP_GATEWAY_TIME_OUT = 504;
public static int NGX_HTTP_INSUFFICIENT_STORAGE = 507;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_CLOSE = 0;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_CONNECT = 1;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_READ = 2;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_WRITE = 4;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGREMAIN = 8;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGTEXT = 16;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGBIN = 32;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGCLOSE = 64;
public static final int NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGFIRST = 128;
public static final int NGX_HTTP_CLOJURE_EVENT_HANDLER_FLAG_READ = 1;
public static final int NGX_HTTP_CLOJURE_EVENT_HANDLER_FLAG_WRITE = 2;
public static final int NGX_HTTP_CLOJURE_EVENT_HANDLER_FLAG_NOKEEPALIVE = 4;
// public static final String HEADERS = RT.keyword(null, "headers");
// public static final String BODY = RT.keyword(null, "body");
//these consts are initialized by MemoryUtil.initMemIndex
public static RequestVarFetcher SERVER_PORT_FETCHER;// = new RequestKnownNameVarFetcher("server_port");
public static RequestVarFetcher SERVER_NAME_FETCHER;// = new RequestKnownNameVarFetcher("server_name");
public static RequestVarFetcher REMOTE_ADDR_FETCHER;// = new RequestKnownNameVarFetcher("remote_addr");
public static RequestVarFetcher URI_FETCHER;// = new RequestKnownOffsetVarFetcher(NGX_HTTP_CLOJURE_REQ_URI_OFFSET);
public static RequestVarFetcher QUERY_STRING_FETCHER;// = new RequestKnownOffsetVarFetcher(NGX_HTTP_CLOJURE_REQ_ARGS_OFFSET);
public static RequestVarFetcher SCHEME_FETCHER;//= new RequestKnownNameVarFetcher("scheme");
public static RequestVarFetcher REQUEST_METHOD_FETCHER;// = new RequestMethodFetcher();
public static RequestVarFetcher CONTENT_TYPE_FETCHER;// = new RequestKnownHeaderFetcher("content-type");
public static RequestVarFetcher CHARACTER_ENCODING_FETCHER;// = new RequestCharacterEncodingFetcher();
// public static RequestVarFetcher HEADER_FETCHER;// = new RequestHeaderFetcher();
public static RequestVarFetcher BODY_FETCHER;// = new RequestBodyFetcher();
public static final int MODE_DEFAULT = 0;
public static final int MODE_THREAD = 1;
public static final int MODE_COROUTINE = 2;
public static final String REQUEST_FORECE_PREFETCH_ALL_PROPERTIES = "fore-prefetch-all-properties";
}
| nginx-clojure/nginx-clojure | src/java/nginx/clojure/MiniConstants.java |
1,468 | /*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.index.shard;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.ReferenceManager;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.BufferedChecksumIndexInput;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.ThreadInterruptedException;
import org.opensearch.ExceptionsHelper;
import org.opensearch.OpenSearchException;
import org.opensearch.action.ActionRunnable;
import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.opensearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.opensearch.action.support.replication.PendingReplicationActions;
import org.opensearch.action.support.replication.ReplicationResponse;
import org.opensearch.cluster.metadata.DataStream;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.metadata.MappingMetadata;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.node.DiscoveryNodes;
import org.opensearch.cluster.routing.IndexShardRoutingTable;
import org.opensearch.cluster.routing.RecoverySource;
import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.common.Booleans;
import org.opensearch.common.CheckedConsumer;
import org.opensearch.common.CheckedFunction;
import org.opensearch.common.CheckedRunnable;
import org.opensearch.common.Nullable;
import org.opensearch.common.SetOnce;
import org.opensearch.common.annotation.PublicApi;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.concurrent.GatedCloseable;
import org.opensearch.common.io.stream.BytesStreamOutput;
import org.opensearch.common.lease.Releasable;
import org.opensearch.common.lease.Releasables;
import org.opensearch.common.lucene.Lucene;
import org.opensearch.common.lucene.index.OpenSearchDirectoryReader;
import org.opensearch.common.metrics.CounterMetric;
import org.opensearch.common.metrics.MeanMetric;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.concurrent.AbstractRunnable;
import org.opensearch.common.util.concurrent.AsyncIOProcessor;
import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor;
import org.opensearch.common.util.concurrent.RunOnce;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.common.util.io.IOUtils;
import org.opensearch.core.Assertions;
import org.opensearch.core.action.ActionListener;
import org.opensearch.core.common.unit.ByteSizeValue;
import org.opensearch.core.index.Index;
import org.opensearch.core.index.shard.ShardId;
import org.opensearch.core.indices.breaker.CircuitBreakerService;
import org.opensearch.core.rest.RestStatus;
import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.gateway.WriteStateException;
import org.opensearch.index.IndexModule;
import org.opensearch.index.IndexNotFoundException;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.ReplicationStats;
import org.opensearch.index.SegmentReplicationShardStats;
import org.opensearch.index.VersionType;
import org.opensearch.index.cache.IndexCache;
import org.opensearch.index.cache.bitset.ShardBitsetFilterCache;
import org.opensearch.index.cache.request.ShardRequestCache;
import org.opensearch.index.codec.CodecService;
import org.opensearch.index.engine.CommitStats;
import org.opensearch.index.engine.Engine;
import org.opensearch.index.engine.Engine.GetResult;
import org.opensearch.index.engine.EngineConfig;
import org.opensearch.index.engine.EngineConfigFactory;
import org.opensearch.index.engine.EngineException;
import org.opensearch.index.engine.EngineFactory;
import org.opensearch.index.engine.NRTReplicationEngine;
import org.opensearch.index.engine.ReadOnlyEngine;
import org.opensearch.index.engine.RefreshFailedEngineException;
import org.opensearch.index.engine.SafeCommitInfo;
import org.opensearch.index.engine.Segment;
import org.opensearch.index.engine.SegmentsStats;
import org.opensearch.index.fielddata.FieldDataStats;
import org.opensearch.index.fielddata.ShardFieldData;
import org.opensearch.index.flush.FlushStats;
import org.opensearch.index.get.GetStats;
import org.opensearch.index.get.ShardGetService;
import org.opensearch.index.mapper.DocumentMapper;
import org.opensearch.index.mapper.DocumentMapperForType;
import org.opensearch.index.mapper.IdFieldMapper;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.mapper.Mapping;
import org.opensearch.index.mapper.ParsedDocument;
import org.opensearch.index.mapper.RootObjectMapper;
import org.opensearch.index.mapper.SourceToParse;
import org.opensearch.index.mapper.Uid;
import org.opensearch.index.merge.MergeStats;
import org.opensearch.index.recovery.RecoveryStats;
import org.opensearch.index.refresh.RefreshStats;
import org.opensearch.index.remote.RemoteSegmentStats;
import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory;
import org.opensearch.index.search.stats.SearchStats;
import org.opensearch.index.search.stats.ShardSearchStats;
import org.opensearch.index.seqno.ReplicationTracker;
import org.opensearch.index.seqno.RetentionLease;
import org.opensearch.index.seqno.RetentionLeaseStats;
import org.opensearch.index.seqno.RetentionLeaseSyncer;
import org.opensearch.index.seqno.RetentionLeases;
import org.opensearch.index.seqno.SeqNoStats;
import org.opensearch.index.seqno.SequenceNumbers;
import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask;
import org.opensearch.index.similarity.SimilarityService;
import org.opensearch.index.store.RemoteSegmentStoreDirectory;
import org.opensearch.index.store.RemoteStoreFileDownloader;
import org.opensearch.index.store.Store;
import org.opensearch.index.store.Store.MetadataSnapshot;
import org.opensearch.index.store.StoreFileMetadata;
import org.opensearch.index.store.StoreStats;
import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata;
import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory;
import org.opensearch.index.translog.RemoteFsTranslog;
import org.opensearch.index.translog.RemoteTranslogStats;
import org.opensearch.index.translog.Translog;
import org.opensearch.index.translog.TranslogConfig;
import org.opensearch.index.translog.TranslogFactory;
import org.opensearch.index.translog.TranslogRecoveryRunner;
import org.opensearch.index.translog.TranslogStats;
import org.opensearch.index.warmer.ShardIndexWarmerService;
import org.opensearch.index.warmer.WarmerStats;
import org.opensearch.indices.IndexingMemoryController;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.RemoteStoreSettings;
import org.opensearch.indices.cluster.IndicesClusterStateService;
import org.opensearch.indices.recovery.PeerRecoveryTargetService;
import org.opensearch.indices.recovery.RecoveryFailedException;
import org.opensearch.indices.recovery.RecoveryListener;
import org.opensearch.indices.recovery.RecoverySettings;
import org.opensearch.indices.recovery.RecoveryState;
import org.opensearch.indices.recovery.RecoveryTarget;
import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint;
import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher;
import org.opensearch.indices.replication.common.ReplicationTimer;
import org.opensearch.repositories.RepositoriesService;
import org.opensearch.repositories.Repository;
import org.opensearch.search.suggest.completion.CompletionStats;
import org.opensearch.threadpool.ThreadPool;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.channels.ClosedByInterruptException;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.LongSupplier;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import static org.opensearch.index.seqno.RetentionLeaseActions.RETAIN_ALL;
import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY;
import static org.opensearch.index.seqno.SequenceNumbers.MAX_SEQ_NO;
import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_SEEDED;
import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_MIGRATING_UNSEEDED;
import static org.opensearch.index.shard.IndexShard.ShardMigrationState.REMOTE_NON_MIGRATING;
import static org.opensearch.index.translog.Translog.Durability;
import static org.opensearch.index.translog.Translog.TRANSLOG_UUID_KEY;
/**
* An OpenSearch index shard
*
* @opensearch.api
*/
@PublicApi(since = "1.0.0")
public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard {
private final ThreadPool threadPool;
private final MapperService mapperService;
private final IndexCache indexCache;
private final Store store;
private final InternalIndexingStats internalIndexingStats;
private final ShardSearchStats searchStats = new ShardSearchStats();
private final ShardGetService getService;
private final ShardIndexWarmerService shardWarmerService;
private final ShardRequestCache requestCacheStats;
private final ShardFieldData shardFieldData;
private final ShardBitsetFilterCache shardBitsetFilterCache;
private final Object mutex = new Object();
private final String checkIndexOnStartup;
private final CodecService codecService;
private final Engine.Warmer warmer;
private final SimilarityService similarityService;
private final TranslogConfig translogConfig;
private final IndexEventListener indexEventListener;
private final QueryCachingPolicy cachingPolicy;
private final Supplier<Sort> indexSortSupplier;
// Package visible for testing
final CircuitBreakerService circuitBreakerService;
private final SearchOperationListener searchOperationListener;
private final GlobalCheckpointListeners globalCheckpointListeners;
private final PendingReplicationActions pendingReplicationActions;
private final ReplicationTracker replicationTracker;
private final SegmentReplicationCheckpointPublisher checkpointPublisher;
protected volatile ShardRouting shardRouting;
protected volatile IndexShardState state;
// ensure happens-before relation between addRefreshListener() and postRecovery()
private final Object postRecoveryMutex = new Object();
private volatile long pendingPrimaryTerm; // see JavaDocs for getPendingPrimaryTerm
private final Object engineMutex = new Object(); // lock ordering: engineMutex -> mutex
private final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
final EngineFactory engineFactory;
final EngineConfigFactory engineConfigFactory;
private final IndexingOperationListener indexingOperationListeners;
private final Runnable globalCheckpointSyncer;
Runnable getGlobalCheckpointSyncer() {
return globalCheckpointSyncer;
}
private final RetentionLeaseSyncer retentionLeaseSyncer;
@Nullable
private volatile RecoveryState recoveryState;
private final RecoveryStats recoveryStats = new RecoveryStats();
private final MeanMetric refreshMetric = new MeanMetric();
private final MeanMetric externalRefreshMetric = new MeanMetric();
private final MeanMetric flushMetric = new MeanMetric();
private final CounterMetric periodicFlushMetric = new CounterMetric();
private final ShardEventListener shardEventListener = new ShardEventListener();
private final ShardPath path;
private final IndexShardOperationPermits indexShardOperationPermits;
private static final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.POST_RECOVERY);
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
// in state RECOVERING or POST_RECOVERY.
// for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on
// version checks to make sure its consistent a relocated shard can also be target of a replication if the relocation target has not
// been marked as active yet and is syncing it's changes back to the relocation source
private static final EnumSet<IndexShardState> writeAllowedStates = EnumSet.of(
IndexShardState.RECOVERING,
IndexShardState.POST_RECOVERY,
IndexShardState.STARTED
);
private final CheckedFunction<DirectoryReader, DirectoryReader, IOException> readerWrapper;
/**
* True if this shard is still indexing (recently) and false if we've been idle for long enough (as periodically checked by {@link
* IndexingMemoryController}).
*/
private final AtomicBoolean active = new AtomicBoolean();
/**
* Allows for the registration of listeners that are called when a change becomes visible for search.
*/
private final RefreshListeners refreshListeners;
private final AtomicLong lastSearcherAccess = new AtomicLong();
private final AtomicReference<Translog.Location> pendingRefreshLocation = new AtomicReference<>();
private final RefreshPendingLocationListener refreshPendingLocationListener;
private volatile boolean useRetentionLeasesInPeerRecovery;
private final Store remoteStore;
private final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier;
private final boolean isTimeSeriesIndex;
private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory;
private final List<ReferenceManager.RefreshListener> internalRefreshListener = new ArrayList<>();
private final RemoteStoreFileDownloader fileDownloader;
private final RecoverySettings recoverySettings;
private final RemoteStoreSettings remoteStoreSettings;
/*
On source doc rep node, It will be DOCREP_NON_MIGRATING.
On source remote node , it will be REMOTE_MIGRATING_SEEDED when relocating from remote node
On source remote node , it will be REMOTE_MIGRATING_UNSEEDED when relocating from docrep node
*/
private final ShardMigrationState shardMigrationState;
private DiscoveryNodes discoveryNodes;
public IndexShard(
final ShardRouting shardRouting,
final IndexSettings indexSettings,
final ShardPath path,
final Store store,
final Supplier<Sort> indexSortSupplier,
final IndexCache indexCache,
final MapperService mapperService,
final SimilarityService similarityService,
final EngineFactory engineFactory,
final EngineConfigFactory engineConfigFactory,
final IndexEventListener indexEventListener,
final CheckedFunction<DirectoryReader, DirectoryReader, IOException> indexReaderWrapper,
final ThreadPool threadPool,
final BigArrays bigArrays,
final Engine.Warmer warmer,
final List<SearchOperationListener> searchOperationListener,
final List<IndexingOperationListener> listeners,
final Runnable globalCheckpointSyncer,
final RetentionLeaseSyncer retentionLeaseSyncer,
final CircuitBreakerService circuitBreakerService,
final BiFunction<IndexSettings, ShardRouting, TranslogFactory> translogFactorySupplier,
@Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher,
@Nullable final Store remoteStore,
final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory,
final String nodeId,
final RecoverySettings recoverySettings,
final RemoteStoreSettings remoteStoreSettings,
boolean seedRemote,
final DiscoveryNodes discoveryNodes
) throws IOException {
super(shardRouting.shardId(), indexSettings);
assert shardRouting.initializing();
this.shardRouting = shardRouting;
final Settings settings = indexSettings.getSettings();
this.codecService = new CodecService(mapperService, indexSettings, logger);
this.warmer = warmer;
this.similarityService = similarityService;
Objects.requireNonNull(store, "Store must be provided to the index shard");
this.engineFactory = Objects.requireNonNull(engineFactory);
this.engineConfigFactory = Objects.requireNonNull(engineConfigFactory);
this.store = store;
this.indexSortSupplier = indexSortSupplier;
this.indexEventListener = indexEventListener;
this.threadPool = threadPool;
this.translogSyncProcessor = createTranslogSyncProcessor(
logger,
threadPool,
this::getEngine,
indexSettings.isAssignedOnRemoteNode(),
() -> getRemoteTranslogUploadBufferInterval(remoteStoreSettings::getClusterRemoteTranslogBufferInterval)
);
this.mapperService = mapperService;
this.indexCache = indexCache;
this.internalIndexingStats = new InternalIndexingStats();
final List<IndexingOperationListener> listenersList = new ArrayList<>(listeners);
listenersList.add(internalIndexingStats);
this.indexingOperationListeners = new IndexingOperationListener.CompositeListener(listenersList, logger);
this.globalCheckpointSyncer = globalCheckpointSyncer;
this.retentionLeaseSyncer = Objects.requireNonNull(retentionLeaseSyncer);
final List<SearchOperationListener> searchListenersList = new ArrayList<>(searchOperationListener);
searchListenersList.add(searchStats);
this.searchOperationListener = new SearchOperationListener.CompositeListener(searchListenersList, logger);
this.getService = new ShardGetService(indexSettings, this, mapperService);
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
this.requestCacheStats = new ShardRequestCache();
this.shardFieldData = new ShardFieldData();
this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings);
state = IndexShardState.CREATED;
this.path = path;
this.circuitBreakerService = circuitBreakerService;
/* create engine config */
logger.debug("state: [CREATED]");
this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP);
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays, nodeId, seedRemote);
final String aId = shardRouting.allocationId().getId();
final long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardId.id());
this.pendingPrimaryTerm = primaryTerm;
this.globalCheckpointListeners = new GlobalCheckpointListeners(shardId, threadPool.scheduler(), logger);
this.pendingReplicationActions = new PendingReplicationActions(shardId, threadPool);
this.replicationTracker = new ReplicationTracker(
shardId,
aId,
indexSettings,
primaryTerm,
UNASSIGNED_SEQ_NO,
globalCheckpointListeners::globalCheckpointUpdated,
threadPool::absoluteTimeInMillis,
(retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, aId, getPendingPrimaryTerm(), retentionLeases, listener),
this::getSafeCommitInfo,
pendingReplicationActions,
isShardOnRemoteEnabledNode
);
// the query cache is a node-level thing, however we want the most popular filters
// to be computed on a per-shard basis
if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) {
cachingPolicy = new QueryCachingPolicy() {
@Override
public void onUse(Query query) {
}
@Override
public boolean shouldCache(Query query) {
return true;
}
};
} else {
cachingPolicy = new UsageTrackingQueryCachingPolicy();
}
indexShardOperationPermits = new IndexShardOperationPermits(shardId, threadPool);
readerWrapper = indexReaderWrapper;
refreshListeners = buildRefreshListeners();
lastSearcherAccess.set(threadPool.relativeTimeInMillis());
persistMetadata(path, indexSettings, shardRouting, null, logger);
this.useRetentionLeasesInPeerRecovery = replicationTracker.hasAllPeerRecoveryRetentionLeases();
this.refreshPendingLocationListener = new RefreshPendingLocationListener();
this.checkpointPublisher = checkpointPublisher;
this.remoteStore = remoteStore;
this.translogFactorySupplier = translogFactorySupplier;
this.isTimeSeriesIndex = (mapperService == null || mapperService.documentMapper() == null)
? false
: mapperService.documentMapper().mappers().containsTimeStampField();
this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory;
this.recoverySettings = recoverySettings;
this.remoteStoreSettings = remoteStoreSettings;
this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings);
this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote);
this.discoveryNodes = discoveryNodes;
}
public ThreadPool getThreadPool() {
return this.threadPool;
}
public Store store() {
return this.store;
}
public boolean isMigratingToRemote() {
// set it true only if shard is remote, but index setting doesn't say so
return shardMigrationState == REMOTE_MIGRATING_UNSEEDED || shardMigrationState == REMOTE_MIGRATING_SEEDED;
}
public boolean shouldSeedRemoteStore() {
// set it true only if relocating from docrep to remote store
return shardMigrationState == REMOTE_MIGRATING_UNSEEDED;
}
/**
* To be delegated to {@link ReplicationTracker} so that relevant remote store based
* operations can be ignored during engine migration
* <p>
* Has explicit null checks to ensure that the {@link ReplicationTracker#invariant()}
* checks does not fail during a cluster manager state update when the latest replication group
* calculation is not yet done and the cached replication group details are available
*/
public Function<String, Boolean> isShardOnRemoteEnabledNode = nodeId -> {
DiscoveryNode node = discoveryNodes.get(nodeId);
if (node != null) {
logger.trace("Node {} has remote_enabled as {}", nodeId, node.isRemoteStoreNode());
return node.isRemoteStoreNode();
}
return false;
};
public boolean isRemoteSeeded() {
return shardMigrationState == REMOTE_MIGRATING_SEEDED;
}
public Store remoteStore() {
return this.remoteStore;
}
/**
* Return the sort order of this index, or null if the index has no sort.
*/
public Sort getIndexSort() {
return indexSortSupplier.get();
}
public ShardGetService getService() {
return this.getService;
}
public ShardBitsetFilterCache shardBitsetFilterCache() {
return shardBitsetFilterCache;
}
public MapperService mapperService() {
return mapperService;
}
public SearchOperationListener getSearchOperationListener() {
return this.searchOperationListener;
}
public ShardIndexWarmerService warmerService() {
return this.shardWarmerService;
}
public ShardRequestCache requestCache() {
return this.requestCacheStats;
}
public ShardFieldData fieldData() {
return this.shardFieldData;
}
public boolean isSystem() {
return indexSettings.getIndexMetadata().isSystem();
}
/**
* Returns the name of the default codec in codecService
*/
public String getDefaultCodecName() {
return codecService.codec(CodecService.DEFAULT_CODEC).getName();
}
/**
* USE THIS METHOD WITH CARE!
* Returns the primary term the index shard is supposed to be on. In case of primary promotion or when a replica learns about
* a new term due to a new primary, the term that's exposed here will not be the term that the shard internally uses to assign
* to operations. The shard will auto-correct its internal operation term, but this might take time.
* See {@link org.opensearch.cluster.metadata.IndexMetadata#primaryTerm(int)}
*/
public long getPendingPrimaryTerm() {
return this.pendingPrimaryTerm;
}
/** Returns the primary term that is currently being used to assign to operations */
public long getOperationPrimaryTerm() {
return replicationTracker.getOperationPrimaryTerm();
}
/**
* Returns the latest cluster routing entry received with this shard.
*/
@Override
public ShardRouting routingEntry() {
return this.shardRouting;
}
public QueryCachingPolicy getQueryCachingPolicy() {
return cachingPolicy;
}
/** Only used for testing **/
protected RemoteStoreStatsTrackerFactory getRemoteStoreStatsTrackerFactory() {
return remoteStoreStatsTrackerFactory;
}
public String getNodeId() {
return translogConfig.getNodeId();
}
public RecoverySettings getRecoverySettings() {
return recoverySettings;
}
public RemoteStoreSettings getRemoteStoreSettings() {
return remoteStoreSettings;
}
public RemoteStoreFileDownloader getFileDownloader() {
return fileDownloader;
}
@Override
public void updateShardState(
final ShardRouting newRouting,
final long newPrimaryTerm,
final BiConsumer<IndexShard, ActionListener<ResyncTask>> primaryReplicaSyncer,
final long applyingClusterStateVersion,
final Set<String> inSyncAllocationIds,
final IndexShardRoutingTable routingTable,
DiscoveryNodes discoveryNodes
) throws IOException {
this.discoveryNodes = discoveryNodes;
final ShardRouting currentRouting;
synchronized (mutex) {
currentRouting = this.shardRouting;
assert currentRouting != null;
if (!newRouting.shardId().equals(shardId())) {
throw new IllegalArgumentException(
"Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId()
);
}
if (newRouting.isSameAllocation(currentRouting) == false) {
throw new IllegalArgumentException(
"Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting
);
}
if (currentRouting.primary() && newRouting.primary() == false) {
throw new IllegalArgumentException(
"illegal state: trying to move shard from primary mode to replica mode. Current "
+ currentRouting
+ ", new "
+ newRouting
);
}
if (newRouting.primary()) {
replicationTracker.updateFromClusterManager(applyingClusterStateVersion, inSyncAllocationIds, routingTable);
}
if (state == IndexShardState.POST_RECOVERY && newRouting.active()) {
assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting;
assert currentRouting.isRelocationTarget() == false
|| currentRouting.primary() == false
|| replicationTracker.isPrimaryMode()
: "a primary relocation is completed by the cluster-managerr, but primary mode is not active " + currentRouting;
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
// Flush here after relocation of primary, so that replica get all changes from new primary rather than waiting for more
// docs to get indexed.
if (indexSettings.isSegRepEnabledOrRemoteNode()) {
flush(new FlushRequest().waitIfOngoing(true).force(true));
}
} else if (currentRouting.primary()
&& currentRouting.relocating()
&& replicationTracker.isRelocated()
&& (newRouting.relocating() == false || newRouting.equalsIgnoringMetadata(currentRouting) == false)) {
// if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard
// routing occur (e.g. due to recovery failure / cancellation). The reason is that at the moment we cannot safely
// reactivate primary mode without risking two active primaries.
throw new IndexShardRelocatedException(
shardId(),
"Shard is marked as relocated, cannot safely move to state " + newRouting.state()
);
}
assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED
: "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state;
persistMetadata(path, indexSettings, newRouting, currentRouting, logger);
final CountDownLatch shardStateUpdated = new CountDownLatch(1);
if (newRouting.primary()) {
if (newPrimaryTerm == pendingPrimaryTerm) {
if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) {
// the cluster-manager started a recovering primary, activate primary mode.
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
postActivatePrimaryMode();
}
} else {
assert currentRouting.primary() == false : "term is only increased as part of primary promotion";
/* Note that due to cluster state batching an initializing primary shard term can failed and re-assigned
* in one state causing it's term to be incremented. Note that if both current shard state and new
* shard state are initializing, we could replace the current shard and reinitialize it. It is however
* possible that this shard is being started. This can happen if:
* 1) Shard is post recovery and sends shard started to the cluster-manager
* 2) Node gets disconnected and rejoins
* 3) Cluster-manager assigns the shard back to the node
* 4) Cluster-manager processes the shard started and starts the shard
* 5) The node process the cluster state where the shard is both started and primary term is incremented.
*
* We could fail the shard in that case, but this will cause it to be removed from the insync allocations list
* potentially preventing re-allocation.
*/
assert newRouting.initializing() == false : "a started primary shard should never update its term; "
+ "shard "
+ newRouting
+ ", "
+ "current term ["
+ pendingPrimaryTerm
+ "], "
+ "new term ["
+ newPrimaryTerm
+ "]";
assert newPrimaryTerm > pendingPrimaryTerm : "primary terms can only go up; current term ["
+ pendingPrimaryTerm
+ "], new term ["
+ newPrimaryTerm
+ "]";
/*
* Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we
* increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary
* term is incremented.
*/
// to prevent primary relocation handoff while resync is not completed
boolean resyncStarted = primaryReplicaResyncInProgress.compareAndSet(false, true);
if (resyncStarted == false) {
throw new IllegalStateException("cannot start resync while it's already in progress");
}
bumpPrimaryTerm(newPrimaryTerm, () -> {
shardStateUpdated.await();
assert pendingPrimaryTerm == newPrimaryTerm : "shard term changed on primary. expected ["
+ newPrimaryTerm
+ "] but was ["
+ pendingPrimaryTerm
+ "]"
+ ", current routing: "
+ currentRouting
+ ", new routing: "
+ newRouting;
assert getOperationPrimaryTerm() == newPrimaryTerm;
try {
if (indexSettings.isSegRepEnabledOrRemoteNode()) {
// this Shard's engine was read only, we need to update its engine before restoring local history from xlog.
assert newRouting.primary() && currentRouting.primary() == false;
ReplicationTimer timer = new ReplicationTimer();
timer.start();
logger.debug(
"Resetting engine on promotion of shard [{}] to primary, startTime {}\n",
shardId,
timer.startTime()
);
resetEngineToGlobalCheckpoint();
timer.stop();
logger.info("Completed engine failover for shard [{}] in: {} ms", shardId, timer.time());
// It is possible an engine can open with a SegmentInfos on a higher gen but the reader does not refresh to
// trigger our refresh listener.
// Force update the checkpoint post engine reset.
updateReplicationCheckpoint();
}
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
if (indexSettings.isSegRepEnabledOrRemoteNode()) {
// force publish a checkpoint once in primary mode so that replicas not caught up to previous primary
// are brought up to date.
checkpointPublisher.publish(this, getLatestReplicationCheckpoint());
}
postActivatePrimaryMode();
/*
* If this shard was serving as a replica shard when another shard was promoted to primary then
* its Lucene index was reset during the primary term transition. In particular, the Lucene index
* on this shard was reset to the global checkpoint and the operations above the local checkpoint
* were reverted. If the other shard that was promoted to primary subsequently fails before the
* primary/replica re-sync completes successfully and we are now being promoted, we have to restore
* the reverted operations on this shard by replaying the translog to avoid losing acknowledged writes.
*/
final Engine engine = getEngine();
engine.translogManager()
.restoreLocalHistoryFromTranslog(
engine.getProcessedLocalCheckpoint(),
(snapshot) -> runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {})
);
/* Rolling the translog generation is not strictly needed here (as we will never have collisions between
* sequence numbers in a translog generation in a new primary as it takes the last known sequence number
* as a starting point), but it simplifies reasoning about the relationship between primary terms and
* translog generations.
*/
engine.translogManager().rollTranslogGeneration();
engine.fillSeqNoGaps(newPrimaryTerm);
replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), getLocalCheckpoint());
primaryReplicaSyncer.accept(this, new ActionListener<ResyncTask>() {
@Override
public void onResponse(ResyncTask resyncTask) {
logger.info("primary-replica resync completed with {} operations", resyncTask.getResyncedOperations());
boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
}
@Override
public void onFailure(Exception e) {
boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
if (state == IndexShardState.CLOSED) {
// ignore, shutting down
} else {
failShard("exception during primary-replica resync", e);
}
}
});
} catch (final AlreadyClosedException e) {
// okay, the index was deleted
}
}, null);
}
}
// set this last, once we finished updating all internal state.
this.shardRouting = newRouting;
assert this.shardRouting.primary() == false || this.shardRouting.started() == false || // note that we use started and not
// active to avoid relocating shards
this.indexShardOperationPermits.isBlocked() || // if permits are blocked, we are still transitioning
this.replicationTracker.isPrimaryMode() : "a started primary with non-pending operation term must be in primary mode "
+ this.shardRouting;
shardStateUpdated.countDown();
}
if (currentRouting.active() == false && newRouting.active()) {
indexEventListener.afterIndexShardStarted(this);
}
if (newRouting.equals(currentRouting) == false) {
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
}
if (indexSettings.isSoftDeleteEnabled() && useRetentionLeasesInPeerRecovery == false && state() == IndexShardState.STARTED) {
final RetentionLeases retentionLeases = replicationTracker.getRetentionLeases();
final Set<ShardRouting> shardRoutings = new HashSet<>(routingTable.getShards());
shardRoutings.addAll(routingTable.assignedShards()); // include relocation targets
if (shardRoutings.stream()
.allMatch(
shr -> shr.assignedToNode() && retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shr))
)) {
useRetentionLeasesInPeerRecovery = true;
turnOffTranslogRetention();
}
}
}
/**
* Marks the shard as recovering based on a recovery state, fails with exception is recovering is not allowed to be set.
*/
public IndexShardState markAsRecovering(String reason, RecoveryState recoveryState) throws IndexShardStartedException,
IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
if (state == IndexShardState.RECOVERING) {
throw new IndexShardRecoveringException(shardId);
}
if (state == IndexShardState.POST_RECOVERY) {
throw new IndexShardRecoveringException(shardId);
}
this.recoveryState = recoveryState;
return changeState(IndexShardState.RECOVERING, reason);
}
}
private final AtomicBoolean primaryReplicaResyncInProgress = new AtomicBoolean();
/**
* Completes the relocation. Operations are blocked and current operations are drained before changing state to
* relocated. After all operations are successfully blocked, performSegRep is executed followed by target relocation
* handoff.
*
* @param consumer a {@link Runnable} that is executed after performSegRep
* @param performSegRep a {@link Runnable} that is executed after operations are blocked
* @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation
* @throws IllegalStateException if the relocation target is no longer part of the replication group
* @throws InterruptedException if blocking operations is interrupted
*/
public void relocated(
final String targetAllocationId,
final Consumer<ReplicationTracker.PrimaryContext> consumer,
final Runnable performSegRep
) throws IllegalIndexShardStateException, IllegalStateException, InterruptedException {
assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting;
// The below list of releasable ensures that if the relocation does not happen, we undo the activity of close and
// acquire all permits. This will ensure that the remote store uploads can still be done by the existing primary shard.
List<Releasable> releasablesOnHandoffFailures = new ArrayList<>(2);
try (Releasable forceRefreshes = refreshListeners.forceRefreshes()) {
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
forceRefreshes.close();
boolean syncTranslog = (isRemoteTranslogEnabled() || this.isMigratingToRemote())
&& Durability.ASYNC == indexSettings.getTranslogDurability();
// Since all the index permits are acquired at this point, the translog buffer will not change.
// It is safe to perform sync of translogs now as this will ensure for remote-backed indexes, the
// translogs has been uploaded to the remote store.
if (syncTranslog) {
maybeSync();
}
// Ensures all in-flight remote store refreshes drain, before we perform the performSegRep.
for (ReferenceManager.RefreshListener refreshListener : internalRefreshListener) {
if (refreshListener instanceof ReleasableRetryableRefreshListener) {
releasablesOnHandoffFailures.add(((ReleasableRetryableRefreshListener) refreshListener).drainRefreshes());
}
}
// Ensure all in-flight remote store translog upload drains, before we perform the performSegRep.
releasablesOnHandoffFailures.add(getEngine().translogManager().drainSync());
// no shard operation permits are being held here, move state from started to relocated
assert indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED
: "in-flight operations in progress while moving shard state to relocated";
performSegRep.run();
/*
* We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a
* network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations.
*/
verifyRelocatingState();
final ReplicationTracker.PrimaryContext primaryContext = replicationTracker.startRelocationHandoff(targetAllocationId);
try {
consumer.accept(primaryContext);
synchronized (mutex) {
verifyRelocatingState();
replicationTracker.completeRelocationHandoff(); // make changes to primaryMode and relocated flag only under
// mutex
}
} catch (final Exception e) {
try {
replicationTracker.abortRelocationHandoff();
} catch (final Exception inner) {
e.addSuppressed(inner);
}
throw e;
}
});
} catch (TimeoutException e) {
logger.warn("timed out waiting for relocation hand-off to complete");
// This is really bad as ongoing replication operations are preventing this shard from completing relocation hand-off.
// Fail primary relocation source and target shards.
failShard("timed out waiting for relocation hand-off to complete", null);
throw new IndexShardClosedException(shardId(), "timed out waiting for relocation hand-off to complete");
} catch (Exception ex) {
assert replicationTracker.isPrimaryMode();
// If the primary mode is still true after the end of handoff attempt, it basically means that the relocation
// failed. The existing primary will continue to be the primary, so we need to allow the segments and translog
// upload to resume.
Releasables.close(releasablesOnHandoffFailures);
throw ex;
}
}
private void maybeSync() {
try {
if (isSyncNeeded()) {
sync();
}
} catch (IOException e) {
logger.warn("failed to sync translog", e);
}
}
private void verifyRelocatingState() {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
/*
* If the cluster-manager cancelled recovery, the target will be removed and the recovery will be cancelled. However, it is still possible
* that we concurrently end up here and therefore have to protect that we do not mark the shard as relocated when its shard routing
* says otherwise.
*/
if (shardRouting.relocating() == false) {
throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED, ": shard is no longer relocating " + shardRouting);
}
if (primaryReplicaResyncInProgress.get()) {
throw new IllegalIndexShardStateException(
shardId,
IndexShardState.STARTED,
": primary relocation is forbidden while primary-replica resync is in progress " + shardRouting
);
}
}
@Override
public IndexShardState state() {
return state;
}
/**
* Changes the state of the current shard
*
* @param newState the new shard state
* @param reason the reason for the state change
* @return the previous shard state
*/
private IndexShardState changeState(IndexShardState newState, String reason) {
assert Thread.holdsLock(mutex);
logger.debug("state: [{}]->[{}], reason [{}]", state, newState, reason);
IndexShardState previousState = state;
state = newState;
this.indexEventListener.indexShardStateChanged(this, previousState, newState, reason);
return previousState;
}
public Engine.IndexResult applyIndexOperationOnPrimary(
long version,
VersionType versionType,
SourceToParse sourceToParse,
long ifSeqNo,
long ifPrimaryTerm,
long autoGeneratedTimestamp,
boolean isRetry
) throws IOException {
assert versionType.validateVersionForWrites(version);
return applyIndexOperation(
getEngine(),
UNASSIGNED_SEQ_NO,
getOperationPrimaryTerm(),
version,
versionType,
ifSeqNo,
ifPrimaryTerm,
autoGeneratedTimestamp,
isRetry,
Engine.Operation.Origin.PRIMARY,
sourceToParse,
null
);
}
public Engine.IndexResult applyIndexOperationOnReplica(
String id,
long seqNo,
long opPrimaryTerm,
long version,
long autoGeneratedTimeStamp,
boolean isRetry,
SourceToParse sourceToParse
) throws IOException {
return applyIndexOperation(
getEngine(),
seqNo,
opPrimaryTerm,
version,
null,
UNASSIGNED_SEQ_NO,
0,
autoGeneratedTimeStamp,
isRetry,
Engine.Operation.Origin.REPLICA,
sourceToParse,
id
);
}
private Engine.IndexResult applyIndexOperation(
Engine engine,
long seqNo,
long opPrimaryTerm,
long version,
@Nullable VersionType versionType,
long ifSeqNo,
long ifPrimaryTerm,
long autoGeneratedTimeStamp,
boolean isRetry,
Engine.Operation.Origin origin,
SourceToParse sourceToParse,
String id
) throws IOException {
// For Segment Replication enabled replica shards we can be skip parsing the documents as we directly copy segments from primary
// shard.
if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary() == false) {
Engine.Index index = new Engine.Index(
new Term(IdFieldMapper.NAME, Uid.encodeId(id)),
new ParsedDocument(null, null, id, null, null, sourceToParse.source(), sourceToParse.getMediaType(), null),
seqNo,
opPrimaryTerm,
version,
null,
Engine.Operation.Origin.REPLICA,
System.nanoTime(),
autoGeneratedTimeStamp,
isRetry,
UNASSIGNED_SEQ_NO,
0
);
return getEngine().index(index);
}
assert opPrimaryTerm <= getOperationPrimaryTerm() : "op term [ "
+ opPrimaryTerm
+ " ] > shard term ["
+ getOperationPrimaryTerm()
+ "]";
ensureWriteAllowed(origin);
Engine.Index operation;
try {
operation = prepareIndex(
docMapper(),
sourceToParse,
seqNo,
opPrimaryTerm,
version,
versionType,
origin,
autoGeneratedTimeStamp,
isRetry,
ifSeqNo,
ifPrimaryTerm
);
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
return new Engine.IndexResult(update);
}
} catch (Exception e) {
// We treat any exception during parsing and or mapping update as a document level failure
// with the exception side effects of closing the shard. Since we don't have the shard, we
// can not raise an exception that may block any replication of previous operations to the
// replicas
verifyNotClosed(e);
return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo);
}
return index(engine, operation);
}
public static Engine.Index prepareIndex(
DocumentMapperForType docMapper,
SourceToParse source,
long seqNo,
long primaryTerm,
long version,
VersionType versionType,
Engine.Operation.Origin origin,
long autoGeneratedIdTimestamp,
boolean isRetry,
long ifSeqNo,
long ifPrimaryTerm
) {
long startTime = System.nanoTime();
ParsedDocument doc = docMapper.getDocumentMapper().parse(source);
if (docMapper.getMapping() != null) {
doc.addDynamicMappingsUpdate(docMapper.getMapping());
}
Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id()));
return new Engine.Index(
uid,
doc,
seqNo,
primaryTerm,
version,
versionType,
origin,
startTime,
autoGeneratedIdTimestamp,
isRetry,
ifSeqNo,
ifPrimaryTerm
);
}
private Engine.IndexResult index(Engine engine, Engine.Index index) throws IOException {
active.set(true);
final Engine.IndexResult result;
index = indexingOperationListeners.preIndex(shardId, index);
try {
if (logger.isTraceEnabled()) {
// don't use index.source().utf8ToString() here source might not be valid UTF-8
logger.trace(
"index [{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}]",
index.id(),
index.seqNo(),
routingEntry().allocationId(),
index.primaryTerm(),
getOperationPrimaryTerm(),
index.origin()
);
}
result = engine.index(index);
if (logger.isTraceEnabled()) {
logger.trace(
"index-done [{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}] "
+ "result-seq# [{}] result-term [{}] failure [{}]",
index.id(),
index.seqNo(),
routingEntry().allocationId(),
index.primaryTerm(),
getOperationPrimaryTerm(),
index.origin(),
result.getSeqNo(),
result.getTerm(),
result.getFailure()
);
}
} catch (Exception e) {
if (logger.isTraceEnabled()) {
logger.trace(
new ParameterizedMessage(
"index-fail [{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}]",
index.id(),
index.seqNo(),
routingEntry().allocationId(),
index.primaryTerm(),
getOperationPrimaryTerm(),
index.origin()
),
e
);
}
indexingOperationListeners.postIndex(shardId, index, e);
throw e;
}
indexingOperationListeners.postIndex(shardId, index, result);
return result;
}
public Engine.NoOpResult markSeqNoAsNoop(long seqNo, long opPrimaryTerm, String reason) throws IOException {
return markSeqNoAsNoop(getEngine(), seqNo, opPrimaryTerm, reason, Engine.Operation.Origin.REPLICA);
}
private Engine.NoOpResult markSeqNoAsNoop(Engine engine, long seqNo, long opPrimaryTerm, String reason, Engine.Operation.Origin origin)
throws IOException {
assert opPrimaryTerm <= getOperationPrimaryTerm() : "op term [ "
+ opPrimaryTerm
+ " ] > shard term ["
+ getOperationPrimaryTerm()
+ "]";
long startTime = System.nanoTime();
ensureWriteAllowed(origin);
final Engine.NoOp noOp = new Engine.NoOp(seqNo, opPrimaryTerm, origin, startTime, reason);
return noOp(engine, noOp);
}
private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) throws IOException {
active.set(true);
if (logger.isTraceEnabled()) {
logger.trace("noop (seq# [{}])", noOp.seqNo());
}
return engine.noOp(noOp);
}
public Engine.IndexResult getFailedIndexResult(Exception e, long version) {
return new Engine.IndexResult(e, version);
}
public Engine.DeleteResult getFailedDeleteResult(Exception e, long version) {
return new Engine.DeleteResult(e, version, getOperationPrimaryTerm());
}
public Engine.DeleteResult applyDeleteOperationOnPrimary(
long version,
String id,
VersionType versionType,
long ifSeqNo,
long ifPrimaryTerm
) throws IOException {
assert versionType.validateVersionForWrites(version);
return applyDeleteOperation(
getEngine(),
UNASSIGNED_SEQ_NO,
getOperationPrimaryTerm(),
version,
id,
versionType,
ifSeqNo,
ifPrimaryTerm,
Engine.Operation.Origin.PRIMARY
);
}
public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long opPrimaryTerm, long version, String id) throws IOException {
if (indexSettings.isSegRepEnabledOrRemoteNode()) {
final Engine.Delete delete = new Engine.Delete(
id,
new Term(IdFieldMapper.NAME, Uid.encodeId(id)),
seqNo,
opPrimaryTerm,
version,
null,
Engine.Operation.Origin.REPLICA,
System.nanoTime(),
UNASSIGNED_SEQ_NO,
0
);
return getEngine().delete(delete);
}
return applyDeleteOperation(
getEngine(),
seqNo,
opPrimaryTerm,
version,
id,
null,
UNASSIGNED_SEQ_NO,
0,
Engine.Operation.Origin.REPLICA
);
}
private Engine.DeleteResult applyDeleteOperation(
Engine engine,
long seqNo,
long opPrimaryTerm,
long version,
String id,
@Nullable VersionType versionType,
long ifSeqNo,
long ifPrimaryTerm,
Engine.Operation.Origin origin
) throws IOException {
assert opPrimaryTerm <= getOperationPrimaryTerm() : "op term [ "
+ opPrimaryTerm
+ " ] > shard term ["
+ getOperationPrimaryTerm()
+ "]";
ensureWriteAllowed(origin);
final Engine.Delete delete = prepareDelete(id, seqNo, opPrimaryTerm, version, versionType, origin, ifSeqNo, ifPrimaryTerm);
return delete(engine, delete);
}
public static Engine.Delete prepareDelete(
String id,
long seqNo,
long primaryTerm,
long version,
VersionType versionType,
Engine.Operation.Origin origin,
long ifSeqNo,
long ifPrimaryTerm
) {
long startTime = System.nanoTime();
final Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(id));
return new Engine.Delete(id, uid, seqNo, primaryTerm, version, versionType, origin, startTime, ifSeqNo, ifPrimaryTerm);
}
private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException {
active.set(true);
final Engine.DeleteResult result;
delete = indexingOperationListeners.preDelete(shardId, delete);
try {
if (logger.isTraceEnabled()) {
logger.trace("delete [{}] (seq no [{}])", delete.uid().text(), delete.seqNo());
}
result = engine.delete(delete);
} catch (Exception e) {
indexingOperationListeners.postDelete(shardId, delete, e);
throw e;
}
indexingOperationListeners.postDelete(shardId, delete, result);
return result;
}
public Engine.GetResult get(Engine.Get get) {
readAllowed();
DocumentMapper mapper = mapperService.documentMapper();
if (mapper == null) {
return GetResult.NOT_EXISTS;
}
return getEngine().get(get, this::acquireSearcher);
}
/**
* Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link AlreadyClosedException}.
*/
public void refresh(String source) {
verifyNotClosed();
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [{}]", source);
}
getEngine().refresh(source);
}
/**
* Returns how many bytes we are currently moving from heap to disk
*/
public long getWritingBytes() {
Engine engine = getEngineOrNull();
if (engine == null) {
return 0;
}
return engine.getWritingBytes();
}
public RefreshStats refreshStats() {
int listeners = refreshListeners.pendingCount();
return new RefreshStats(
refreshMetric.count(),
TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()),
externalRefreshMetric.count(),
TimeUnit.NANOSECONDS.toMillis(externalRefreshMetric.sum()),
listeners
);
}
public FlushStats flushStats() {
return new FlushStats(flushMetric.count(), periodicFlushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum()));
}
public DocsStats docStats() {
readAllowed();
return getEngine().docStats();
}
/**
* @return {@link CommitStats}
* @throws AlreadyClosedException if shard is closed
*/
public CommitStats commitStats() {
return getEngine().commitStats();
}
/**
* @return {@link SeqNoStats}
* @throws AlreadyClosedException if shard is closed
*/
public SeqNoStats seqNoStats() {
return getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint());
}
public IndexingStats indexingStats() {
Engine engine = getEngineOrNull();
final boolean throttled;
final long throttleTimeInMillis;
if (engine == null) {
throttled = false;
throttleTimeInMillis = 0;
} else {
throttled = engine.isThrottled();
throttleTimeInMillis = engine.getIndexThrottleTimeInMillis();
}
return internalIndexingStats.stats(throttled, throttleTimeInMillis);
}
public SearchStats searchStats(String... groups) {
return searchStats.stats(groups);
}
public GetStats getStats() {
return getService.stats();
}
public StoreStats storeStats() {
try {
final RecoveryState recoveryState = this.recoveryState;
final long bytesStillToRecover = recoveryState == null ? -1L : recoveryState.getIndex().bytesStillToRecover();
return store.stats(bytesStillToRecover == -1 ? StoreStats.UNKNOWN_RESERVED_BYTES : bytesStillToRecover);
} catch (IOException e) {
failShard("Failing shard because of exception during storeStats", e);
throw new OpenSearchException("io exception while building 'store stats'", e);
}
}
public MergeStats mergeStats() {
final Engine engine = getEngineOrNull();
if (engine == null) {
return new MergeStats();
}
final MergeStats mergeStats = engine.getMergeStats();
mergeStats.addUnreferencedFileCleanUpStats(engine.unreferencedFileCleanUpsPerformed());
return mergeStats;
}
public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) {
SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes, includeUnloadedSegments);
segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes());
// Populate remote_store stats only if the index is remote store backed
if (indexSettings().isAssignedOnRemoteNode()) {
segmentsStats.addRemoteSegmentStats(
new RemoteSegmentStats(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId).stats())
);
}
if (indexSettings.isSegRepEnabledOrRemoteNode()) {
segmentsStats.addReplicationStats(getReplicationStats());
}
return segmentsStats;
}
public WarmerStats warmerStats() {
return shardWarmerService.stats();
}
public FieldDataStats fieldDataStats(String... fields) {
return shardFieldData.stats(fields);
}
public TranslogStats translogStats() {
TranslogStats translogStats = getEngine().translogManager().getTranslogStats();
// Populate remote_store stats only if the index is remote store backed
if (indexSettings.isAssignedOnRemoteNode()) {
translogStats.addRemoteTranslogStats(
new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats())
);
}
return translogStats;
}
public CompletionStats completionStats(String... fields) {
readAllowed();
return getEngine().completionStats(fields);
}
/**
* Executes the given flush request against the engine.
*
* @param request the flush request
*/
public void flush(FlushRequest request) {
final boolean waitIfOngoing = request.waitIfOngoing();
final boolean force = request.force();
logger.trace("flush with {}", request);
/*
* We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under
* control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer
* since we use Engine#writeIndexingBuffer for this now.
*/
verifyNotClosed();
final long time = System.nanoTime();
getEngine().flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time);
}
/**
* checks and removes translog files that no longer need to be retained. See
* {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details
*/
public void trimTranslog() {
if (indexSettings.isAssignedOnRemoteNode()) {
return;
}
verifyNotClosed();
final Engine engine = getEngine();
engine.translogManager().trimUnreferencedTranslogFiles();
}
/**
* Rolls the tranlog generation and cleans unneeded.
*/
public void rollTranslogGeneration() throws IOException {
final Engine engine = getEngine();
engine.translogManager().rollTranslogGeneration();
}
public void forceMerge(ForceMergeRequest forceMerge) throws IOException {
verifyActive();
if (logger.isTraceEnabled()) {
logger.trace("force merge with {}", forceMerge);
}
Engine engine = getEngine();
engine.forceMerge(
forceMerge.flush(),
forceMerge.maxNumSegments(),
forceMerge.onlyExpungeDeletes(),
false,
false,
forceMerge.forceMergeUUID()
);
}
/**
* Upgrades the shard to the current version of Lucene and returns the minimum segment version
*/
public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) throws IOException {
verifyActive();
if (logger.isTraceEnabled()) {
logger.trace("upgrade with {}", upgrade);
}
org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion();
// we just want to upgrade the segments, not actually forge merge to a single segment
final Engine engine = getEngine();
engine.forceMerge(
true, // we need to flush at the end to make sure the upgrade is durable
Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment
false,
true,
upgrade.upgradeOnlyAncientSegments(),
null
);
org.apache.lucene.util.Version version = minimumCompatibleVersion();
if (logger.isTraceEnabled()) {
logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version);
}
return version;
}
public org.apache.lucene.util.Version minimumCompatibleVersion() {
org.apache.lucene.util.Version luceneVersion = null;
for (Segment segment : getEngine().segments(false)) {
if (luceneVersion == null || luceneVersion.onOrAfter(segment.getVersion())) {
luceneVersion = segment.getVersion();
}
}
return luceneVersion == null ? indexSettings.getIndexVersionCreated().luceneVersion : luceneVersion;
}
/**
* Creates a new {@link IndexCommit} snapshot from the currently running engine. All resources referenced by this
* commit won't be freed until the commit / snapshot is closed.
*
* @param flushFirst <code>true</code> if the index should first be flushed to disk / a low level lucene commit should be executed
*/
public GatedCloseable<IndexCommit> acquireLastIndexCommit(boolean flushFirst) throws EngineException {
final IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) {
return getEngine().acquireLastIndexCommit(flushFirst);
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
public GatedCloseable<IndexCommit> acquireLastIndexCommitAndRefresh(boolean flushFirst) throws EngineException {
GatedCloseable<IndexCommit> indexCommit = acquireLastIndexCommit(flushFirst);
getEngine().refresh("Snapshot for Remote Store based Shard");
return indexCommit;
}
/**
*
* @param snapshotId Snapshot UUID.
* @param primaryTerm current primary term.
* @param generation Snapshot Commit Generation.
* @throws IOException if there is some failure in acquiring lock in remote store.
*/
public void acquireLockOnCommitData(String snapshotId, long primaryTerm, long generation) throws IOException {
RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteDirectory();
remoteSegmentStoreDirectory.acquireLock(primaryTerm, generation, snapshotId);
}
/**
*
* @param snapshotId Snapshot UUID.
* @param primaryTerm current primary term.
* @param generation Snapshot Commit Generation.
* @throws IOException if there is some failure in releasing lock in remote store.
*/
public void releaseLockOnCommitData(String snapshotId, long primaryTerm, long generation) throws IOException {
RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteDirectory();
remoteSegmentStoreDirectory.releaseLock(primaryTerm, generation, snapshotId);
}
public Optional<NRTReplicationEngine> getReplicationEngine() {
if (getEngine() instanceof NRTReplicationEngine) {
return Optional.of((NRTReplicationEngine) getEngine());
} else {
return Optional.empty();
}
}
public void finalizeReplication(SegmentInfos infos) throws IOException {
if (getReplicationEngine().isPresent()) {
getReplicationEngine().get().updateSegments(infos);
}
}
/**
* Snapshots the most recent safe index commit from the currently running engine.
* All index files referenced by this index commit won't be freed until the commit/snapshot is closed.
*/
public GatedCloseable<IndexCommit> acquireSafeIndexCommit() throws EngineException {
final IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) {
return getEngine().acquireSafeIndexCommit();
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
/**
* return the most recently computed ReplicationCheckpoint for a particular shard.
* The checkpoint is updated inside a refresh listener and may lag behind the SegmentInfos on the reader.
* To guarantee the checkpoint is upto date with the latest on-reader infos, use `getLatestSegmentInfosAndCheckpoint` instead.
*
* @return {@link ReplicationCheckpoint} - The most recently computed ReplicationCheckpoint.
*/
public ReplicationCheckpoint getLatestReplicationCheckpoint() {
return replicationTracker.getLatestReplicationCheckpoint();
}
/**
* Compute and return the latest ReplicationCheckpoint for a shard and a GatedCloseable containing the corresponding SegmentInfos.
* The segments referenced by the SegmentInfos will remain on disk until the GatedCloseable is closed.
* <p>
* Primary shards compute the seqNo used in the replication checkpoint from the fetched SegmentInfos.
* Replica shards compute the seqNo from its latest processed checkpoint, which only increases when refreshing on new segments.
*
* @return A {@link Tuple} containing SegmentInfos wrapped in a {@link GatedCloseable} and the {@link ReplicationCheckpoint} computed from the infos.
*
*/
public Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() {
assert indexSettings.isSegRepEnabledOrRemoteNode();
// do not close the snapshot - caller will close it.
GatedCloseable<SegmentInfos> snapshot = null;
try {
snapshot = getSegmentInfosSnapshot();
final SegmentInfos segmentInfos = snapshot.get();
return new Tuple<>(snapshot, computeReplicationCheckpoint(segmentInfos));
} catch (IOException | AlreadyClosedException e) {
logger.error("Error Fetching SegmentInfos and latest checkpoint", e);
if (snapshot != null) {
try {
snapshot.close();
} catch (IOException ex) {
throw new OpenSearchException("Error Closing SegmentInfos Snapshot", e);
}
}
}
return new Tuple<>(new GatedCloseable<>(null, () -> {}), getLatestReplicationCheckpoint());
}
/**
* Compute the latest {@link ReplicationCheckpoint} from a SegmentInfos.
* This function fetches a metadata snapshot from the store that comes with an IO cost.
* We will reuse the existing stored checkpoint if it is at the same SI version.
*
* @param segmentInfos {@link SegmentInfos} infos to use to compute.
* @return {@link ReplicationCheckpoint} Checkpoint computed from the infos.
* @throws IOException When there is an error computing segment metadata from the store.
*/
ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) throws IOException {
if (segmentInfos == null) {
return ReplicationCheckpoint.empty(shardId);
}
final ReplicationCheckpoint latestReplicationCheckpoint = getLatestReplicationCheckpoint();
if (latestReplicationCheckpoint.getSegmentInfosVersion() == segmentInfos.getVersion()
&& latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration()
&& latestReplicationCheckpoint.getPrimaryTerm() == getOperationPrimaryTerm()) {
return latestReplicationCheckpoint;
}
final Map<String, StoreFileMetadata> metadataMap = store.getSegmentMetadataMap(segmentInfos);
final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(
this.shardId,
getOperationPrimaryTerm(),
segmentInfos.getGeneration(),
segmentInfos.getVersion(),
metadataMap.values().stream().mapToLong(StoreFileMetadata::length).sum(),
getEngine().config().getCodec().getName(),
metadataMap
);
logger.trace("Recomputed ReplicationCheckpoint for shard {}", checkpoint);
return checkpoint;
}
/**
* Checks if this target shard should start a round of segment replication.
* @return - True if the shard is able to perform segment replication.
*/
public boolean isSegmentReplicationAllowed() {
if (indexSettings.isSegRepEnabledOrRemoteNode() == false) {
logger.trace("Attempting to perform segment replication when it is not enabled on the index");
return false;
}
if (getReplicationTracker().isPrimaryMode()) {
logger.trace("Shard is in primary mode and cannot perform segment replication as a replica.");
return false;
}
if (this.routingEntry().primary()) {
logger.trace("Shard routing is marked primary thus cannot perform segment replication as replica");
return false;
}
if (state().equals(IndexShardState.STARTED) == false
&& (state() == IndexShardState.POST_RECOVERY && shardRouting.state() == ShardRoutingState.INITIALIZING) == false) {
logger.trace(
() -> new ParameterizedMessage(
"Shard is not started or recovering {} {} and cannot perform segment replication as a replica",
state(),
shardRouting.state()
)
);
return false;
}
if (getReplicationEngine().isEmpty()) {
logger.trace(
() -> new ParameterizedMessage(
"Shard does not have the correct engine type to perform segment replication {}.",
getEngine().getClass()
)
);
return false;
}
return true;
}
/**
* Checks if checkpoint should be processed
*
* @param requestCheckpoint received checkpoint that is checked for processing
* @return true if checkpoint should be processed
*/
public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckpoint) {
if (isSegmentReplicationAllowed() == false) {
return false;
}
final ReplicationCheckpoint localCheckpoint = getLatestReplicationCheckpoint();
if (requestCheckpoint.isAheadOf(localCheckpoint) == false) {
logger.trace(
() -> new ParameterizedMessage(
"Ignoring new replication checkpoint - Shard is already on checkpoint {} that is ahead of {}",
localCheckpoint,
requestCheckpoint
)
);
return false;
}
return true;
}
/**
* gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard,
* without having to worry about the current state of the engine and concurrent flushes.
*
* @throws org.apache.lucene.index.IndexNotFoundException if no index is found in the current directory
* @throws org.apache.lucene.index.CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum
* mismatch or an unexpected exception when opening the index reading the
* segments file.
* @throws org.apache.lucene.index.IndexFormatTooOldException if the lucene index is too old to be opened.
* @throws org.apache.lucene.index.IndexFormatTooNewException if the lucene index is too new to be opened.
* @throws java.io.FileNotFoundException if one or more files referenced by a commit are not present.
* @throws java.nio.file.NoSuchFileException if one or more files referenced by a commit are not present.
*/
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
assert Thread.holdsLock(mutex) == false : "snapshotting store metadata under mutex";
GatedCloseable<IndexCommit> wrappedIndexCommit = null;
store.incRef();
try {
synchronized (engineMutex) {
// if the engine is not running, we can access the store directly, but we need to make sure no one starts
// the engine on us. If the engine is running, we can get a snapshot via the deletion policy of the engine.
final Engine engine = getEngineOrNull();
if (engine != null) {
wrappedIndexCommit = engine.acquireLastIndexCommit(false);
}
if (wrappedIndexCommit == null) {
return store.getMetadata(null, true);
}
}
return store.getMetadata(wrappedIndexCommit.get());
} finally {
store.decRef();
IOUtils.close(wrappedIndexCommit);
}
}
/**
* Fetch a map of StoreFileMetadata for each segment from the latest SegmentInfos.
* This is used to compute diffs for segment replication.
*
* @return - Map of Segment Filename to its {@link StoreFileMetadata}
* @throws IOException - When there is an error loading metadata from the store.
*/
public Map<String, StoreFileMetadata> getSegmentMetadataMap() throws IOException {
try (final GatedCloseable<SegmentInfos> snapshot = getSegmentInfosSnapshot()) {
return store.getSegmentMetadataMap(snapshot.get());
}
}
/**
* Fails the shard and marks the shard store as corrupted if
* <code>e</code> is caused by index corruption
*/
public void failShard(String reason, @Nullable Exception e) {
// fail the engine. This will cause this shard to also be removed from the node's index service.
getEngine().failEngine(reason, e);
}
/**
* Acquires a point-in-time reader that can be used to create {@link Engine.Searcher}s on demand.
*/
public Engine.SearcherSupplier acquireSearcherSupplier() {
return acquireSearcherSupplier(Engine.SearcherScope.EXTERNAL);
}
/**
* Acquires a point-in-time reader that can be used to create {@link Engine.Searcher}s on demand.
*/
public Engine.SearcherSupplier acquireSearcherSupplier(Engine.SearcherScope scope) {
readAllowed();
markSearcherAccessed();
final Engine engine = getEngine();
return engine.acquireSearcherSupplier(this::wrapSearcher, scope);
}
public Engine.Searcher acquireSearcher(String source) {
return acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
}
private void markSearcherAccessed() {
lastSearcherAccess.lazySet(threadPool.relativeTimeInMillis());
}
private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) {
readAllowed();
markSearcherAccessed();
final Engine engine = getEngine();
return engine.acquireSearcher(source, scope, this::wrapSearcher);
}
private Engine.Searcher wrapSearcher(Engine.Searcher searcher) {
assert OpenSearchDirectoryReader.unwrap(searcher.getDirectoryReader()) != null
: "DirectoryReader must be an instance or OpenSearchDirectoryReader";
boolean success = false;
try {
final Engine.Searcher newSearcher = readerWrapper == null ? searcher : wrapSearcher(searcher, readerWrapper);
assert newSearcher != null;
success = true;
return newSearcher;
} catch (IOException ex) {
throw new OpenSearchException("failed to wrap searcher", ex);
} finally {
if (success == false) {
Releasables.close(success, searcher);
}
}
}
static Engine.Searcher wrapSearcher(
Engine.Searcher engineSearcher,
CheckedFunction<DirectoryReader, DirectoryReader, IOException> readerWrapper
) throws IOException {
assert readerWrapper != null;
final OpenSearchDirectoryReader openSearchDirectoryReader = OpenSearchDirectoryReader.getOpenSearchDirectoryReader(
engineSearcher.getDirectoryReader()
);
if (openSearchDirectoryReader == null) {
throw new IllegalStateException("Can't wrap non opensearch directory reader");
}
NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader());
DirectoryReader reader = readerWrapper.apply(nonClosingReaderWrapper);
if (reader != nonClosingReaderWrapper) {
if (reader.getReaderCacheHelper() != openSearchDirectoryReader.getReaderCacheHelper()) {
throw new IllegalStateException(
"wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey,"
+ " wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be "
+ "used as cache keys since their are used only per request which would lead to subtle bugs"
);
}
if (OpenSearchDirectoryReader.getOpenSearchDirectoryReader(reader) != openSearchDirectoryReader) {
// prevent that somebody wraps with a non-filter reader
throw new IllegalStateException("wrapped directory reader hides actual OpenSearchDirectoryReader but shouldn't");
}
}
if (reader == nonClosingReaderWrapper) {
return engineSearcher;
} else {
// we close the reader to make sure wrappers can release resources if needed....
// our NonClosingReaderWrapper makes sure that our reader is not closed
return new Engine.Searcher(
engineSearcher.source(),
reader,
engineSearcher.getSimilarity(),
engineSearcher.getQueryCache(),
engineSearcher.getQueryCachingPolicy(),
() -> IOUtils.close(
reader, // this will close the wrappers excluding the NonClosingReaderWrapper
engineSearcher
)
); // this will run the closeable on the wrapped engine reader
}
}
public void onCheckpointPublished(ReplicationCheckpoint checkpoint) {
replicationTracker.startReplicationLagTimers(checkpoint);
}
/**
* Used with segment replication during relocation handoff, this method updates current read only engine to global
* checkpoint followed by changing to writeable engine
*
* @throws IOException if communication failed
* @throws InterruptedException if calling thread is interrupted
* @throws TimeoutException if timed out waiting for in-flight operations to finish
*
* @opensearch.internal
*/
public void resetToWriteableEngine() throws IOException, InterruptedException, TimeoutException {
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { resetEngineToGlobalCheckpoint(); });
}
/**
* Wrapper for a non-closing reader
*
* @opensearch.internal
*/
private static final class NonClosingReaderWrapper extends FilterDirectoryReader {
private NonClosingReaderWrapper(DirectoryReader in) throws IOException {
super(in, new SubReaderWrapper() {
@Override
public LeafReader wrap(LeafReader reader) {
return reader;
}
});
}
@Override
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
return new NonClosingReaderWrapper(in);
}
@Override
protected void doClose() throws IOException {
// don't close here - mimic the MultiReader#doClose = false behavior that FilterDirectoryReader doesn't have
}
@Override
public CacheHelper getReaderCacheHelper() {
return in.getReaderCacheHelper();
}
}
public void close(String reason, boolean flushEngine, boolean deleted) throws IOException {
synchronized (engineMutex) {
try {
synchronized (mutex) {
changeState(IndexShardState.CLOSED, reason);
}
} finally {
final Engine engine = this.currentEngineReference.getAndSet(null);
try {
if (engine != null && flushEngine) {
engine.flushAndClose();
}
} finally {
// playing safe here and close the engine even if the above succeeds - close can be called multiple times
// Also closing refreshListeners to prevent us from accumulating any more listeners
IOUtils.close(engine, globalCheckpointListeners, refreshListeners, pendingReplicationActions);
if (deleted && engine != null && isPrimaryMode()) {
// Translog Clean up
engine.translogManager().onDelete();
}
indexShardOperationPermits.close();
}
}
}
}
/*
ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003
*/
public RemoteSegmentStoreDirectory getRemoteDirectory() {
assert indexSettings.isAssignedOnRemoteNode();
assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory";
FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory();
FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate();
final Directory remoteDirectory = byteSizeCachingStoreDirectory.getDelegate();
return ((RemoteSegmentStoreDirectory) remoteDirectory);
}
/**
* Returns true iff it is able to verify that remote segment store
* is in sync with local
*/
public boolean isRemoteSegmentStoreInSync() {
assert indexSettings.isAssignedOnRemoteNode();
try {
RemoteSegmentStoreDirectory directory = getRemoteDirectory();
if (directory.readLatestMetadataFile() != null) {
Collection<String> uploadFiles = directory.getSegmentsUploadedToRemoteStore().keySet();
try (GatedCloseable<SegmentInfos> segmentInfosGatedCloseable = getSegmentInfosSnapshot()) {
Collection<String> localSegmentInfosFiles = segmentInfosGatedCloseable.get().files(true);
Set<String> localFiles = new HashSet<>(localSegmentInfosFiles);
// verifying that all files except EXCLUDE_FILES are uploaded to the remote
localFiles.removeAll(RemoteStoreRefreshListener.EXCLUDE_FILES);
if (uploadFiles.containsAll(localFiles)) {
return true;
}
logger.debug(
() -> new ParameterizedMessage(
"RemoteSegmentStoreSyncStatus localSize={} remoteSize={}",
localFiles.size(),
uploadFiles.size()
)
);
}
}
} catch (AlreadyClosedException e) {
throw e;
} catch (Throwable e) {
logger.error("Exception while reading latest metadata", e);
}
return false;
}
public void waitForRemoteStoreSync() throws IOException {
waitForRemoteStoreSync(() -> {});
}
/*
Blocks the calling thread, waiting for the remote store to get synced till internal Remote Upload Timeout
Calls onProgress on seeing an increased file count on remote
Throws IOException if the remote store is not synced within the timeout
*/
public void waitForRemoteStoreSync(Runnable onProgress) throws IOException {
assert indexSettings.isAssignedOnRemoteNode();
RemoteSegmentStoreDirectory directory = getRemoteDirectory();
int segmentUploadeCount = 0;
if (shardRouting.primary() == false) {
return;
}
long startNanos = System.nanoTime();
while (System.nanoTime() - startNanos < getRecoverySettings().internalRemoteUploadTimeout().nanos()) {
try {
if (isRemoteSegmentStoreInSync()) {
return;
} else {
if (directory.getSegmentsUploadedToRemoteStore().size() > segmentUploadeCount) {
onProgress.run();
logger.debug("Uploaded segment count {}", directory.getSegmentsUploadedToRemoteStore().size());
segmentUploadeCount = directory.getSegmentsUploadedToRemoteStore().size();
}
try {
Thread.sleep(TimeValue.timeValueSeconds(30).seconds());
} catch (InterruptedException ie) {
throw new OpenSearchException("Interrupted waiting for completion of [{}]", ie);
}
}
} catch (AlreadyClosedException e) {
// There is no point in waiting as shard is now closed .
return;
}
}
throw new IOException(
"Failed to upload to remote segment store within remote upload timeout of "
+ getRecoverySettings().internalRemoteUploadTimeout().getMinutes()
+ " minutes"
);
}
public void preRecovery() {
final IndexShardState currentState = this.state; // single volatile read
if (currentState == IndexShardState.CLOSED) {
throw new IndexShardNotRecoveringException(shardId, currentState);
}
assert currentState == IndexShardState.RECOVERING : "expected a recovering shard " + shardId + " but got " + currentState;
indexEventListener.beforeIndexShardRecovery(this, indexSettings);
}
public void postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
synchronized (postRecoveryMutex) {
// we need to refresh again to expose all operations that were index until now. Otherwise
// we may not expose operations that were indexed with a refresh listener that was immediately
// responded to in addRefreshListener. The refresh must happen under the same mutex used in addRefreshListener
// and before moving this shard to POST_RECOVERY state (i.e., allow to read from this shard).
getEngine().refresh("post_recovery");
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
recoveryState.setStage(RecoveryState.Stage.DONE);
changeState(IndexShardState.POST_RECOVERY, reason);
}
}
}
/**
* called before starting to copy index files over
*/
public void prepareForIndexRecovery() {
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
recoveryState.setStage(RecoveryState.Stage.INDEX);
assert currentEngineReference.get() == null;
}
/**
* A best effort to bring up this shard to the global checkpoint using the local translog before performing a peer recovery.
*
* @return a sequence number that an operation-based peer recovery can start with.
* This is the first operation after the local checkpoint of the safe commit if exists.
*/
private long recoverLocallyUpToGlobalCheckpoint() {
validateLocalRecoveryState();
final Optional<SequenceNumbers.CommitInfo> safeCommit;
final long globalCheckpoint;
try {
final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(TRANSLOG_UUID_KEY);
globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
safeCommit = store.findSafeIndexCommit(globalCheckpoint);
} catch (org.apache.lucene.index.IndexNotFoundException e) {
logger.trace("skip local recovery as no index commit found");
return UNASSIGNED_SEQ_NO;
} catch (Exception e) {
logger.debug("skip local recovery as failed to find the safe commit", e);
return UNASSIGNED_SEQ_NO;
}
try {
maybeCheckIndex(); // check index here and won't do it again if ops-based recovery occurs
recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
if (safeCommit.isPresent() == false) {
logger.trace("skip local recovery as no safe commit found");
return UNASSIGNED_SEQ_NO;
}
assert safeCommit.get().localCheckpoint <= globalCheckpoint : safeCommit.get().localCheckpoint + " > " + globalCheckpoint;
if (safeCommit.get().localCheckpoint == globalCheckpoint) {
logger.trace(
"skip local recovery as the safe commit is up to date; safe commit {} global checkpoint {}",
safeCommit.get(),
globalCheckpoint
);
recoveryState.getTranslog().totalLocal(0);
return globalCheckpoint + 1;
}
if (indexSettings.getIndexMetadata().getState() == IndexMetadata.State.CLOSE
|| IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexSettings.getSettings())) {
logger.trace(
"skip local recovery as the index was closed or not allowed to write; safe commit {} global checkpoint {}",
safeCommit.get(),
globalCheckpoint
);
recoveryState.getTranslog().totalLocal(0);
return safeCommit.get().localCheckpoint + 1;
}
try {
final TranslogRecoveryRunner translogRecoveryRunner = (snapshot) -> {
recoveryState.getTranslog().totalLocal(snapshot.totalOperations());
final int recoveredOps = runTranslogRecovery(
getEngine(),
snapshot,
Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY,
recoveryState.getTranslog()::incrementRecoveredOperations
);
recoveryState.getTranslog().totalLocal(recoveredOps); // adjust the total local to reflect the actual count
return recoveredOps;
};
innerOpenEngineAndTranslog(() -> globalCheckpoint);
getEngine().translogManager()
.recoverFromTranslog(translogRecoveryRunner, getEngine().getProcessedLocalCheckpoint(), globalCheckpoint);
logger.trace("shard locally recovered up to {}", getEngine().getSeqNoStats(globalCheckpoint));
} finally {
synchronized (engineMutex) {
IOUtils.close(currentEngineReference.getAndSet(null));
}
}
} catch (Exception e) {
logger.debug(new ParameterizedMessage("failed to recover shard locally up to global checkpoint {}", globalCheckpoint), e);
return UNASSIGNED_SEQ_NO;
}
try {
// we need to find the safe commit again as we should have created a new one during the local recovery
final Optional<SequenceNumbers.CommitInfo> newSafeCommit = store.findSafeIndexCommit(globalCheckpoint);
assert newSafeCommit.isPresent() : "no safe commit found after local recovery";
return newSafeCommit.get().localCheckpoint + 1;
} catch (Exception e) {
logger.debug(
new ParameterizedMessage(
"failed to find the safe commit after recovering shard locally up to global checkpoint {}",
globalCheckpoint
),
e
);
return UNASSIGNED_SEQ_NO;
}
}
public long recoverLocallyAndFetchStartSeqNo(boolean localTranslog) {
if (localTranslog) {
return recoverLocallyUpToGlobalCheckpoint();
} else {
return recoverLocallyUptoLastCommit();
}
}
/**
* The method figures out the sequence number basis the last commit.
*
* @return the starting sequence number from which the recovery should start.
*/
private long recoverLocallyUptoLastCommit() {
assert indexSettings.isAssignedOnRemoteNode() : "Remote translog store is not enabled";
long seqNo;
validateLocalRecoveryState();
try {
seqNo = Long.parseLong(store.readLastCommittedSegmentsInfo().getUserData().get(MAX_SEQ_NO));
} catch (org.apache.lucene.index.IndexNotFoundException e) {
logger.error("skip local recovery as no index commit found");
return UNASSIGNED_SEQ_NO;
} catch (Exception e) {
logger.error("skip local recovery as failed to find the safe commit", e);
return UNASSIGNED_SEQ_NO;
}
try {
maybeCheckIndex();
recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
recoveryState.getTranslog().totalLocal(0);
} catch (Exception e) {
logger.error("check index failed during fetch seqNo", e);
return UNASSIGNED_SEQ_NO;
}
return seqNo;
}
private void validateLocalRecoveryState() {
assert Thread.holdsLock(mutex) == false : "recover locally under mutex";
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX);
assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]";
}
public void trimOperationOfPreviousPrimaryTerms(long aboveSeqNo) {
getEngine().translogManager().trimOperationsFromTranslog(getOperationPrimaryTerm(), aboveSeqNo);
}
/**
* Returns the maximum auto_id_timestamp of all append-only requests have been processed by this shard or the auto_id_timestamp received
* from the primary via {@link #updateMaxUnsafeAutoIdTimestamp(long)} at the beginning of a peer-recovery or a primary-replica resync.
*
* @see #updateMaxUnsafeAutoIdTimestamp(long)
*/
public long getMaxSeenAutoIdTimestamp() {
return getEngine().getMaxSeenAutoIdTimestamp();
}
/**
* Since operations stored in soft-deletes do not have max_auto_id_timestamp, the primary has to propagate its max_auto_id_timestamp
* (via {@link #getMaxSeenAutoIdTimestamp()} of all processed append-only requests to replicas at the beginning of a peer-recovery
* or a primary-replica resync to force a replica to disable optimization for all append-only requests which are replicated via
* replication while its retry variants are replicated via recovery without auto_id_timestamp.
* <p>
* Without this force-update, a replica can generate duplicate documents (for the same id) if it first receives
* a retry append-only (without timestamp) via recovery, then an original append-only (with timestamp) via replication.
*/
public void updateMaxUnsafeAutoIdTimestamp(long maxSeenAutoIdTimestampFromPrimary) {
getEngine().updateMaxUnsafeAutoIdTimestamp(maxSeenAutoIdTimestampFromPrimary);
}
public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException {
return applyTranslogOperation(getEngine(), operation, origin);
}
private Engine.Result applyTranslogOperation(Engine engine, Translog.Operation operation, Engine.Operation.Origin origin)
throws IOException {
// If a translog op is replayed on the primary (eg. ccr), we need to use external instead of null for its version type.
final VersionType versionType = (origin == Engine.Operation.Origin.PRIMARY) ? VersionType.EXTERNAL : null;
final Engine.Result result;
switch (operation.opType()) {
case INDEX:
final Translog.Index index = (Translog.Index) operation;
// we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all
// autoGeneratedID docs that are coming from the primary are updated correctly.
result = applyIndexOperation(
engine,
index.seqNo(),
index.primaryTerm(),
index.version(),
versionType,
UNASSIGNED_SEQ_NO,
0,
index.getAutoGeneratedIdTimestamp(),
true,
origin,
new SourceToParse(
shardId.getIndexName(),
index.id(),
index.source(),
MediaTypeRegistry.xContentType(index.source()),
index.routing()
),
index.id()
);
break;
case DELETE:
final Translog.Delete delete = (Translog.Delete) operation;
result = applyDeleteOperation(
engine,
delete.seqNo(),
delete.primaryTerm(),
delete.version(),
delete.id(),
versionType,
UNASSIGNED_SEQ_NO,
0,
origin
);
break;
case NO_OP:
final Translog.NoOp noOp = (Translog.NoOp) operation;
result = markSeqNoAsNoop(engine, noOp.seqNo(), noOp.primaryTerm(), noOp.reason(), origin);
break;
default:
throw new IllegalStateException("No operation defined for [" + operation + "]");
}
return result;
}
/**
* Replays translog operations from the provided translog {@code snapshot} to the current engine using the given {@code origin}.
* The callback {@code onOperationRecovered} is notified after each translog operation is replayed successfully.
*/
int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot, Engine.Operation.Origin origin, Runnable onOperationRecovered)
throws IOException {
int opsRecovered = 0;
Translog.Operation operation;
while ((operation = snapshot.next()) != null) {
try {
logger.trace("[translog] recover op {}", operation);
Engine.Result result = applyTranslogOperation(engine, operation, origin);
switch (result.getResultType()) {
case FAILURE:
throw result.getFailure();
case MAPPING_UPDATE_REQUIRED:
throw new IllegalArgumentException("unexpected mapping update: " + result.getRequiredMappingUpdate());
case SUCCESS:
break;
default:
throw new AssertionError("Unknown result type [" + result.getResultType() + "]");
}
opsRecovered++;
onOperationRecovered.run();
} catch (Exception e) {
// TODO: Don't enable this leniency unless users explicitly opt-in
if (origin == Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY && ExceptionsHelper.status(e) == RestStatus.BAD_REQUEST) {
// mainly for MapperParsingException and Failure to detect xcontent
logger.info("ignoring recovery of a corrupt translog entry", e);
} else {
throw ExceptionsHelper.convertToRuntime(e);
}
}
}
return opsRecovered;
}
private void loadGlobalCheckpointToReplicationTracker() throws IOException {
// we have to set it before we open an engine and recover from the translog because
// acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in,
// and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in.
final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(TRANSLOG_UUID_KEY);
final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint");
}
/**
* opens the engine on top of the existing lucene engine and translog.
* Operations from the translog will be replayed to bring lucene up to date.
**/
public void openEngineAndRecoverFromTranslog() throws IOException {
recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX);
maybeCheckIndex();
recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog();
final TranslogRecoveryRunner translogRecoveryRunner = (snapshot) -> {
translogRecoveryStats.totalOperations(snapshot.totalOperations());
translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations());
return runTranslogRecovery(
getEngine(),
snapshot,
Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY,
translogRecoveryStats::incrementRecoveredOperations
);
};
// Do not load the global checkpoint if this is a remote snapshot index
if (indexSettings.isRemoteSnapshot() == false && indexSettings.isRemoteTranslogStoreEnabled() == false) {
loadGlobalCheckpointToReplicationTracker();
}
innerOpenEngineAndTranslog(replicationTracker);
getEngine().translogManager()
.recoverFromTranslog(translogRecoveryRunner, getEngine().getProcessedLocalCheckpoint(), Long.MAX_VALUE);
}
/**
* Opens the engine on top of the existing lucene engine and translog.
* The translog is kept but its operations won't be replayed.
*/
public void openEngineAndSkipTranslogRecovery() throws IOException {
assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]";
recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG);
loadGlobalCheckpointToReplicationTracker();
innerOpenEngineAndTranslog(replicationTracker);
getEngine().translogManager().skipTranslogRecovery();
}
public void openEngineAndSkipTranslogRecoveryFromSnapshot() throws IOException {
assert routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT : "not a snapshot recovery ["
+ routingEntry()
+ "]";
recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX);
maybeCheckIndex();
recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG);
loadGlobalCheckpointToReplicationTracker();
innerOpenEngineAndTranslog(replicationTracker, false);
getEngine().translogManager().skipTranslogRecovery();
}
private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) throws IOException {
innerOpenEngineAndTranslog(globalCheckpointSupplier, true);
}
private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, boolean syncFromRemote) throws IOException {
syncFromRemote = syncFromRemote && indexSettings.isRemoteSnapshot() == false;
assert Thread.holdsLock(mutex) == false : "opening engine under mutex";
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
final EngineConfig config = newEngineConfig(globalCheckpointSupplier);
// we disable deletes since we allow for operations to be executed against the shard while recovering
// but we need to make sure we don't loose deletes until we are done recovering
config.setEnableGcDeletes(false);
updateRetentionLeasesOnReplica(loadRetentionLeases());
assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false || getRetentionLeases().leases().isEmpty()
: "expected empty set of retention leases with recovery source ["
+ recoveryState.getRecoverySource()
+ "] but got "
+ getRetentionLeases();
synchronized (engineMutex) {
assert currentEngineReference.get() == null : "engine is running";
verifyNotClosed();
if (indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded()) {
// Download missing segments from remote segment store.
if (syncFromRemote) {
syncSegmentsFromRemoteSegmentStore(false);
}
if (shardRouting.primary()) {
if (syncFromRemote) {
syncRemoteTranslogAndUpdateGlobalCheckpoint();
} else {
// we will enter this block when we do not want to recover from remote translog.
// currently only during snapshot restore, we are coming into this block.
// here, as while initiliazing remote translog we cannot skip downloading translog files,
// so before that step, we are deleting the translog files present in remote store.
deleteTranslogFilesFromRemoteTranslog();
}
} else if (syncFromRemote) {
// For replicas, when we download segments from remote segment store, we need to make sure that local
// translog is having the same UUID that is referred by the segments. If they are different, engine open
// fails with TranslogCorruptedException. It is safe to create empty translog for remote store enabled
// indices as replica would only need to read translog in failover scenario and we always fetch data
// from remote translog at the time of failover.
final SegmentInfos lastCommittedSegmentInfos = store().readLastCommittedSegmentsInfo();
final String translogUUID = lastCommittedSegmentInfos.userData.get(TRANSLOG_UUID_KEY);
final long checkpoint = Long.parseLong(lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY));
Translog.createEmptyTranslog(
shardPath().resolveTranslog(),
shardId(),
checkpoint,
getPendingPrimaryTerm(),
translogUUID,
FileChannel::open
);
}
}
// we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata).
final Engine newEngine = engineFactory.newReadWriteEngine(config);
onNewEngine(newEngine);
currentEngineReference.set(newEngine);
if (indexSettings.isSegRepEnabledOrRemoteNode()) {
// set initial replication checkpoints into tracker.
updateReplicationCheckpoint();
}
// We set active because we are now writing operations to the engine; this way,
// we can flush if we go idle after some time and become inactive.
active.set(true);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
// which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine.
onSettingsChanged();
assert assertSequenceNumbersInCommit();
recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG);
}
private boolean assertSequenceNumbersInCommit() throws IOException {
final Map<String, String> userData = fetchUserData();
assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint";
assert userData.containsKey(MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number";
assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid";
assert userData.get(Engine.HISTORY_UUID_KEY).equals(getHistoryUUID()) : "commit point history uuid ["
+ userData.get(Engine.HISTORY_UUID_KEY)
+ "] is different than engine ["
+ getHistoryUUID()
+ "]";
assert userData.containsKey(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) : "opening index which was created post 5.5.0 but "
+ Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID
+ " is not found in commit";
return true;
}
private Map<String, String> fetchUserData() throws IOException {
if (indexSettings.isRemoteSnapshot() && indexSettings.getExtendedCompatibilitySnapshotVersion() != null) {
return Lucene.readSegmentInfos(store.directory(), indexSettings.getExtendedCompatibilitySnapshotVersion()).getUserData();
} else {
return SegmentInfos.readLatestCommit(store.directory()).getUserData();
}
}
private void onNewEngine(Engine newEngine) {
assert Thread.holdsLock(engineMutex);
refreshListeners.setCurrentRefreshLocationSupplier(newEngine.translogManager()::getTranslogLastWriteLocation);
}
/**
* called if recovery has to be restarted after network error / delay **
*/
public void performRecoveryRestart() throws IOException {
assert Thread.holdsLock(mutex) == false : "restart recovery under mutex";
synchronized (engineMutex) {
assert refreshListeners.pendingCount() == 0 : "we can't restart with pending listeners";
IOUtils.close(currentEngineReference.getAndSet(null));
resetRecoveryStage();
}
}
/**
* If a file-based recovery occurs, a recovery target calls this method to reset the recovery stage.
*/
public void resetRecoveryStage() {
assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]";
assert currentEngineReference.get() == null;
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
recoveryState().setStage(RecoveryState.Stage.INIT);
}
/**
* returns stats about ongoing recoveries, both source and target
*/
public RecoveryStats recoveryStats() {
return recoveryStats;
}
/**
* Returns the current {@link RecoveryState} if this shard is recovering or has been recovering.
* Returns null if the recovery has not yet started or shard was not recovered (created via an API).
*/
@Override
public RecoveryState recoveryState() {
return this.recoveryState;
}
/**
* perform the last stages of recovery once all translog operations are done.
* note that you should still call {@link #postRecovery(String)}.
*/
public void finalizeRecovery() {
recoveryState().setStage(RecoveryState.Stage.FINALIZE);
Engine engine = getEngine();
engine.refresh("recovery_finalization");
engine.config().setEnableGcDeletes(true);
}
/**
* Returns {@code true} if this shard can ignore a recovery attempt made to it (since the already doing/done it)
*/
public boolean ignoreRecoveryAttempt() {
IndexShardState state = state(); // one time volatile read
return state == IndexShardState.POST_RECOVERY
|| state == IndexShardState.RECOVERING
|| state == IndexShardState.STARTED
|| state == IndexShardState.CLOSED;
}
public void readAllowed() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (readAllowedStates.contains(state) == false) {
throw new IllegalIndexShardStateException(
shardId,
state,
"operations only allowed when shard state is one of " + readAllowedStates.toString()
);
}
}
/** returns true if the {@link IndexShardState} allows reading */
public boolean isReadAllowed() {
return readAllowedStates.contains(state);
}
private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (origin.isRecovery()) {
if (state != IndexShardState.RECOVERING) {
throw new IllegalIndexShardStateException(
shardId,
state,
"operation only allowed when recovering, origin [" + origin + "]"
);
}
} else {
if (origin == Engine.Operation.Origin.PRIMARY) {
assert assertPrimaryMode();
} else if (origin == Engine.Operation.Origin.REPLICA) {
assert assertReplicationTarget();
} else {
assert origin == Engine.Operation.Origin.LOCAL_RESET;
assert getActiveOperationsCount() == OPERATIONS_BLOCKED
: "locally resetting without blocking operations, active operations are [" + getActiveOperations() + "]";
}
if (writeAllowedStates.contains(state) == false) {
throw new IllegalIndexShardStateException(
shardId,
state,
"operation only allowed when shard state is one of " + writeAllowedStates + ", origin [" + origin + "]"
);
}
}
}
private boolean assertPrimaryMode() {
assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard "
+ shardRouting
+ " is not a primary shard in primary mode";
return true;
}
// Returns true if shard routing is primary & replication tracker is in primary mode.
public boolean isPrimaryMode() {
return shardRouting.primary() && replicationTracker.isPrimaryMode();
}
private boolean assertReplicationTarget() {
assert replicationTracker.isPrimaryMode() == false : "shard " + shardRouting + " in primary mode cannot be a replication target";
return true;
}
private void verifyNotClosed() throws IllegalIndexShardStateException {
verifyNotClosed(null);
}
private void verifyNotClosed(Exception suppressed) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state == IndexShardState.CLOSED) {
final IllegalIndexShardStateException exc = new IndexShardClosedException(shardId, "operation only allowed when not closed");
if (suppressed != null) {
exc.addSuppressed(suppressed);
}
throw exc;
}
}
protected final void verifyActive() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard is active");
}
}
/**
* Returns number of heap bytes used by the indexing buffer for this shard, or 0 if the shard is closed
*/
public long getIndexBufferRAMBytesUsed() {
Engine engine = getEngineOrNull();
if (engine == null) {
return 0;
}
try {
return engine.getIndexBufferRAMBytesUsed();
} catch (AlreadyClosedException ex) {
return 0;
}
}
public void addShardFailureCallback(Consumer<ShardFailure> onShardFailure) {
this.shardEventListener.delegates.add(onShardFailure);
}
/**
* Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last
* indexing operation, so we can flush the index.
*/
public void flushOnIdle(long inactiveTimeNS) {
Engine engineOrNull = getEngineOrNull();
if (engineOrNull != null && System.nanoTime() - engineOrNull.getLastWriteNanos() >= inactiveTimeNS) {
boolean wasActive = active.getAndSet(false);
if (wasActive) {
logger.debug("flushing shard on inactive");
threadPool.executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush shard on inactive", e);
}
}
@Override
protected void doRun() {
flush(new FlushRequest().waitIfOngoing(false).force(false));
periodicFlushMetric.inc();
}
});
}
}
}
public boolean isActive() {
return active.get();
}
public ShardPath shardPath() {
return path;
}
public void recoverFromLocalShards(
Consumer<MappingMetadata> mappingUpdateConsumer,
List<IndexShard> localShards,
ActionListener<Boolean> listener
) throws IOException {
assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard";
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: "
+ recoveryState.getRecoverySource();
final List<LocalShardSnapshot> snapshots = new ArrayList<>();
final ActionListener<Boolean> recoveryListener = ActionListener.runBefore(listener, () -> IOUtils.close(snapshots));
boolean success = false;
try {
for (IndexShard shard : localShards) {
snapshots.add(new LocalShardSnapshot(shard));
}
// we are the first primary, recover from the gateway
// if its post api allocation, the index should exists
assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard";
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromLocalShards(mappingUpdateConsumer, this, snapshots, recoveryListener);
success = true;
} finally {
if (success == false) {
IOUtils.close(snapshots);
}
}
}
public void recoverFromStore(ActionListener<Boolean> listener) {
// we are the first primary, recover from the gateway
// if its post api allocation, the index should exists
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
assert shardRouting.initializing() : "can only start recovery on initializing shard";
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromStore(this, listener);
}
public void restoreFromRemoteStore(ActionListener<Boolean> listener) {
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromRemoteStore(this, listener);
}
public void restoreFromSnapshotAndRemoteStore(
Repository repository,
RepositoriesService repositoriesService,
ActionListener<Boolean> listener
) {
try {
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: "
+ recoveryState.getRecoverySource();
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener, threadPool);
} catch (Exception e) {
listener.onFailure(e);
}
}
public void restoreFromRepository(Repository repository, ActionListener<Boolean> listener) {
try {
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: "
+ recoveryState.getRecoverySource();
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromRepository(this, repository, listener);
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Tests whether or not the engine should be flushed periodically.
* This test is based on the current size of the translog compared to the configured flush threshold size.
*
* @return {@code true} if the engine should be flushed
*/
public boolean shouldPeriodicallyFlush() {
final Engine engine = getEngineOrNull();
if (engine != null) {
try {
return engine.shouldPeriodicallyFlush();
} catch (final AlreadyClosedException e) {
// we are already closed, no need to flush or roll
}
}
return false;
}
/**
* Tests whether or not the translog generation should be rolled to a new generation. This test is based on the size of the current
* generation compared to the configured generation threshold size.
*
* @return {@code true} if the current generation should be rolled to a new generation
*/
boolean shouldRollTranslogGeneration() {
final Engine engine = getEngineOrNull();
if (engine != null) {
try {
return engine.translogManager().shouldRollTranslogGeneration();
} catch (final AlreadyClosedException e) {
// we are already closed, no need to flush or roll
}
}
return false;
}
public void onSettingsChanged() {
Engine engineOrNull = getEngineOrNull();
if (engineOrNull != null) {
final boolean disableTranslogRetention = indexSettings.isSoftDeleteEnabled() && useRetentionLeasesInPeerRecovery;
engineOrNull.onSettingsChanged(
disableTranslogRetention ? TimeValue.MINUS_ONE : indexSettings.getTranslogRetentionAge(),
disableTranslogRetention ? new ByteSizeValue(-1) : indexSettings.getTranslogRetentionSize(),
indexSettings.getSoftDeleteRetentionOperations()
);
}
}
private void turnOffTranslogRetention() {
logger.debug(
"turn off the translog retention for the replication group {} "
+ "as it starts using retention leases exclusively in peer recoveries",
shardId
);
// Off to the generic threadPool as pruning the delete tombstones can be expensive.
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to turn off translog retention", e);
}
}
@Override
protected void doRun() {
onSettingsChanged();
trimTranslog();
}
});
}
/**
* Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed
*/
public Closeable acquireHistoryRetentionLock() {
return getEngine().acquireHistoryRetentionLock();
}
/**
* Creates a new history snapshot for reading operations since
* the provided starting seqno (inclusive) and ending seqno (inclusive)
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
public Translog.Snapshot getHistoryOperations(String reason, long startingSeqNo, long endSeqNo, boolean accurateCount)
throws IOException {
return getEngine().newChangesSnapshot(reason, startingSeqNo, endSeqNo, true, accurateCount);
}
/**
* Creates a new history snapshot from the translog instead of the lucene index. Required for cross cluster replication.
* Use the recommended {@link #getHistoryOperations(String, long, long, boolean)} method for other cases.
* This method should only be invoked if Segment Replication or Remote Store is not enabled.
*/
public Translog.Snapshot getHistoryOperationsFromTranslog(long startingSeqNo, long endSeqNo) throws IOException {
assert indexSettings.isSegRepEnabledOrRemoteNode() == false
: "unsupported operation for segment replication enabled indices or remote store backed indices";
return getEngine().translogManager().newChangesSnapshot(startingSeqNo, endSeqNo, true);
}
/**
* Checks if we have a completed history of operations since the given starting seqno (inclusive).
* This method should be called after acquiring the retention lock; See {@link #acquireHistoryRetentionLock()}
*/
public boolean hasCompleteHistoryOperations(String reason, long startingSeqNo) {
return getEngine().hasCompleteOperationHistory(reason, startingSeqNo);
}
/**
* Gets the minimum retained sequence number for this engine.
*
* @return the minimum retained sequence number
*/
public long getMinRetainedSeqNo() {
return getEngine().getMinRetainedSeqNo();
}
/**
* Counts the number of history operations within the provided sequence numbers
* @param source source of the requester (e.g., peer-recovery)
* @param fromSeqNo from sequence number, included
* @param toSeqNo to sequence number, included
* @return number of history operations in the sequence number range
*/
public int countNumberOfHistoryOperations(String source, long fromSeqNo, long toSeqNo) throws IOException {
return getEngine().countNumberOfHistoryOperations(source, fromSeqNo, toSeqNo);
}
/**
* Creates a new changes snapshot for reading operations whose seq_no are between {@code fromSeqNo}(inclusive)
* and {@code toSeqNo}(inclusive). The caller has to close the returned snapshot after finishing the reading.
*
* @param source the source of the request
* @param fromSeqNo the from seq_no (inclusive) to read
* @param toSeqNo the to seq_no (inclusive) to read
* @param requiredFullRange if {@code true} then {@link Translog.Snapshot#next()} will throw {@link IllegalStateException}
* if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing.
* This parameter should be only enabled when the entire requesting range is below the global checkpoint.
*/
public Translog.Snapshot newChangesSnapshot(
String source,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean accurateCount
) throws IOException {
return getEngine().newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, accurateCount);
}
public List<Segment> segments(boolean verbose) {
return getEngine().segments(verbose);
}
public String getHistoryUUID() {
return getEngine().getHistoryUUID();
}
public IndexEventListener getIndexEventListener() {
return indexEventListener;
}
public void activateThrottling() {
try {
getEngine().activateThrottling();
} catch (AlreadyClosedException ex) {
// ignore
}
}
public void deactivateThrottling() {
try {
getEngine().deactivateThrottling();
} catch (AlreadyClosedException ex) {
// ignore
}
}
private void handleRefreshException(Exception e) {
if (e instanceof AlreadyClosedException) {
// ignore
} else if (e instanceof RefreshFailedEngineException) {
RefreshFailedEngineException rfee = (RefreshFailedEngineException) e;
if (rfee.getCause() instanceof InterruptedException) {
// ignore, we are being shutdown
} else if (rfee.getCause() instanceof ClosedByInterruptException) {
// ignore, we are being shutdown
} else if (rfee.getCause() instanceof ThreadInterruptedException) {
// ignore, we are being shutdown
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform engine refresh", e);
}
}
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform engine refresh", e);
}
}
}
/**
* Called when our shard is using too much heap and should move buffered indexed/deleted documents to disk.
*/
public void writeIndexingBuffer() {
try {
Engine engine = getEngine();
engine.writeIndexingBuffer();
} catch (Exception e) {
handleRefreshException(e);
}
}
/**
* Notifies the service to update the local checkpoint for the shard with the provided allocation ID. See
* {@link ReplicationTracker#updateLocalCheckpoint(String, long)} for
* details.
*
* @param allocationId the allocation ID of the shard to update the local checkpoint for
* @param checkpoint the local checkpoint for the shard
*/
public void updateLocalCheckpointForShard(final String allocationId, final long checkpoint) {
assert assertPrimaryMode();
verifyNotClosed();
replicationTracker.updateLocalCheckpoint(allocationId, checkpoint);
}
/**
* Update the local knowledge of the persisted global checkpoint for the specified allocation ID.
*
* @param allocationId the allocation ID to update the global checkpoint for
* @param globalCheckpoint the global checkpoint
*/
public void updateGlobalCheckpointForShard(final String allocationId, final long globalCheckpoint) {
assert assertPrimaryMode();
verifyNotClosed();
replicationTracker.updateGlobalCheckpointForShard(allocationId, globalCheckpoint);
}
/**
* Update the local knowledge of the visible global checkpoint for the specified allocation ID.
*
* @param allocationId the allocation ID to update the global checkpoint for
* @param visibleCheckpoint the visible checkpoint
*/
public void updateVisibleCheckpointForShard(final String allocationId, final ReplicationCheckpoint visibleCheckpoint) {
// Update target replication checkpoint only when in active primary mode
if (this.isPrimaryMode()) {
verifyNotClosed();
replicationTracker.updateVisibleCheckpointForShard(allocationId, visibleCheckpoint);
}
}
/**
* Fetch stats on segment replication.
* @return {@link Tuple} V1 - TimeValue in ms - mean replication lag for this primary to its entire group,
* V2 - Set of {@link SegmentReplicationShardStats} per shard in this primary's replication group.
*/
public Set<SegmentReplicationShardStats> getReplicationStatsForTrackedReplicas() {
return replicationTracker.getSegmentReplicationStats();
}
public ReplicationStats getReplicationStats() {
if (indexSettings.isSegRepEnabledOrRemoteNode() && routingEntry().primary()) {
final Set<SegmentReplicationShardStats> stats = getReplicationStatsForTrackedReplicas();
long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L);
long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum();
long maxReplicationLag = stats.stream()
.mapToLong(SegmentReplicationShardStats::getCurrentReplicationLagMillis)
.max()
.orElse(0L);
return new ReplicationStats(maxBytesBehind, totalBytesBehind, maxReplicationLag);
}
return new ReplicationStats();
}
/**
* Add a global checkpoint listener. If the global checkpoint is equal to or above the global checkpoint the listener is waiting for,
* then the listener will be notified immediately via an executor (so possibly not on the current thread). If the specified timeout
* elapses before the listener is notified, the listener will be notified with an {@link TimeoutException}. A caller may pass null to
* specify no timeout.
*
* @param waitingForGlobalCheckpoint the global checkpoint the listener is waiting for
* @param listener the listener
* @param timeout the timeout
*/
public void addGlobalCheckpointListener(
final long waitingForGlobalCheckpoint,
final GlobalCheckpointListeners.GlobalCheckpointListener listener,
final TimeValue timeout
) {
this.globalCheckpointListeners.add(waitingForGlobalCheckpoint, listener, timeout);
}
private void ensureSoftDeletesEnabled(String feature) {
if (indexSettings.isSoftDeleteEnabled() == false) {
String message = feature + " requires soft deletes but " + indexSettings.getIndex() + " does not have soft deletes enabled";
assert false : message;
throw new IllegalStateException(message);
}
}
/**
* Get all retention leases tracked on this shard.
*
* @return the retention leases
*/
public RetentionLeases getRetentionLeases() {
return getRetentionLeases(false).v2();
}
/**
* If the expire leases parameter is false, gets all retention leases tracked on this shard and otherwise first calculates
* expiration of existing retention leases, and then gets all non-expired retention leases tracked on this shard. Note that only the
* primary shard calculates which leases are expired, and if any have expired, syncs the retention leases to any replicas. If the
* expire leases parameter is true, this replication tracker must be in primary mode.
*
* @return a tuple indicating whether or not any retention leases were expired, and the non-expired retention leases
*/
public Tuple<Boolean, RetentionLeases> getRetentionLeases(final boolean expireLeases) {
assert expireLeases == false || assertPrimaryMode();
verifyNotClosed();
return replicationTracker.getRetentionLeases(expireLeases);
}
public RetentionLeaseStats getRetentionLeaseStats() {
verifyNotClosed();
return new RetentionLeaseStats(getRetentionLeases());
}
/**
* Adds a new retention lease.
*
* @param id the identifier of the retention lease
* @param retainingSequenceNumber the retaining sequence number
* @param source the source of the retention lease
* @param listener the callback when the retention lease is successfully added and synced to replicas
* @return the new retention lease
* @throws IllegalArgumentException if the specified retention lease already exists
*/
public RetentionLease addRetentionLease(
final String id,
final long retainingSequenceNumber,
final String source,
final ActionListener<ReplicationResponse> listener
) {
Objects.requireNonNull(listener);
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
try (Closeable ignore = acquireHistoryRetentionLock()) {
final long actualRetainingSequenceNumber = retainingSequenceNumber == RETAIN_ALL
? getMinRetainedSeqNo()
: retainingSequenceNumber;
return replicationTracker.addRetentionLease(id, actualRetainingSequenceNumber, source, listener);
} catch (final IOException e) {
throw new AssertionError(e);
}
}
/**
* Renews an existing retention lease.
*
* @param id the identifier of the retention lease
* @param retainingSequenceNumber the retaining sequence number
* @param source the source of the retention lease
* @return the renewed retention lease
* @throws IllegalArgumentException if the specified retention lease does not exist
*/
public RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) {
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
try (Closeable ignore = acquireHistoryRetentionLock()) {
final long actualRetainingSequenceNumber = retainingSequenceNumber == RETAIN_ALL
? getMinRetainedSeqNo()
: retainingSequenceNumber;
return replicationTracker.renewRetentionLease(id, actualRetainingSequenceNumber, source);
} catch (final IOException e) {
throw new AssertionError(e);
}
}
/**
* Removes an existing retention lease.
*
* @param id the identifier of the retention lease
* @param listener the callback when the retention lease is successfully removed and synced to replicas
*/
public void removeRetentionLease(final String id, final ActionListener<ReplicationResponse> listener) {
Objects.requireNonNull(listener);
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
replicationTracker.removeRetentionLease(id, listener);
}
/**
* Updates retention leases on a replica.
*
* @param retentionLeases the retention leases
*/
public void updateRetentionLeasesOnReplica(final RetentionLeases retentionLeases) {
assert assertReplicationTarget();
verifyNotClosed();
replicationTracker.updateRetentionLeasesOnReplica(retentionLeases);
}
/**
* Loads the latest retention leases from their dedicated state file.
*
* @return the retention leases
* @throws IOException if an I/O exception occurs reading the retention leases
*/
public RetentionLeases loadRetentionLeases() throws IOException {
verifyNotClosed();
return replicationTracker.loadRetentionLeases(path.getShardStatePath());
}
/**
* Persists the current retention leases to their dedicated state file.
*
* @throws WriteStateException if an exception occurs writing the state file
*/
public void persistRetentionLeases() throws WriteStateException {
verifyNotClosed();
replicationTracker.persistRetentionLeases(path.getShardStatePath());
}
public boolean assertRetentionLeasesPersisted() throws IOException {
return replicationTracker.assertRetentionLeasesPersisted(path.getShardStatePath());
}
/**
* Syncs the current retention leases to all replicas.
*/
public void syncRetentionLeases() {
assert assertPrimaryMode();
verifyNotClosed();
replicationTracker.renewPeerRecoveryRetentionLeases();
final Tuple<Boolean, RetentionLeases> retentionLeases = getRetentionLeases(true);
if (retentionLeases.v1()) {
logger.trace("syncing retention leases [{}] after expiration check", retentionLeases.v2());
retentionLeaseSyncer.sync(
shardId,
shardRouting.allocationId().getId(),
getPendingPrimaryTerm(),
retentionLeases.v2(),
ActionListener.wrap(
r -> {},
e -> logger.warn(
new ParameterizedMessage("failed to sync retention leases [{}] after expiration check", retentionLeases),
e
)
)
);
} else {
logger.trace("background syncing retention leases [{}] after expiration check", retentionLeases.v2());
retentionLeaseSyncer.backgroundSync(
shardId,
shardRouting.allocationId().getId(),
getPendingPrimaryTerm(),
retentionLeases.v2()
);
}
}
/**
* Called when the recovery process for a shard has opened the engine on the target shard. Ensures that the right data structures
* have been set up locally to track local checkpoint information for the shard and that the shard is added to the replication group.
*
* @param allocationId the allocation ID of the shard for which recovery was initiated
*/
public void initiateTracking(final String allocationId) {
assert assertPrimaryMode();
replicationTracker.initiateTracking(allocationId);
}
/**
* Marks the shard with the provided allocation ID as in-sync with the primary shard. See
* {@link ReplicationTracker#markAllocationIdAsInSync(String, long)}
* for additional details.
*
* @param allocationId the allocation ID of the shard to mark as in-sync
* @param localCheckpoint the current local checkpoint on the shard
*/
public void markAllocationIdAsInSync(final String allocationId, final long localCheckpoint) throws InterruptedException {
assert assertPrimaryMode();
replicationTracker.markAllocationIdAsInSync(allocationId, localCheckpoint);
}
/**
* Returns the persisted local checkpoint for the shard.
*
* @return the local checkpoint
*/
public long getLocalCheckpoint() {
return getEngine().getPersistedLocalCheckpoint();
}
/**
* Fetch the latest checkpoint that has been processed but not necessarily persisted.
* Also see {@link #getLocalCheckpoint()}.
*/
public long getProcessedLocalCheckpoint() {
return getEngine().getProcessedLocalCheckpoint();
}
/**
* Returns the global checkpoint for the shard.
*
* @return the global checkpoint
*/
public long getLastKnownGlobalCheckpoint() {
return replicationTracker.getGlobalCheckpoint();
}
/**
* Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint)
*/
public long getLastSyncedGlobalCheckpoint() {
return getEngine().getLastSyncedGlobalCheckpoint();
}
/**
* Get the local knowledge of the global checkpoints for all in-sync allocation IDs.
*
* @return a map from allocation ID to the local knowledge of the global checkpoint for that allocation ID
*/
public Map<String, Long> getInSyncGlobalCheckpoints() {
assert assertPrimaryMode();
verifyNotClosed();
return replicationTracker.getInSyncGlobalCheckpoints();
}
/**
* Syncs the global checkpoint to the replicas if the global checkpoint on at least one replica is behind the global checkpoint on the
* primary.
*/
public void maybeSyncGlobalCheckpoint(final String reason) {
verifyNotClosed();
assert shardRouting.primary() : "only call maybeSyncGlobalCheckpoint on primary shard";
if (replicationTracker.isPrimaryMode() == false) {
return;
}
assert assertPrimaryMode();
// only sync if there are no operations in flight, or when using async durability
final SeqNoStats stats = getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint());
final boolean asyncDurability = indexSettings().getTranslogDurability() == Durability.ASYNC;
if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint() || asyncDurability) {
final Map<String, Long> globalCheckpoints = getInSyncGlobalCheckpoints();
final long globalCheckpoint = replicationTracker.getGlobalCheckpoint();
// async durability means that the local checkpoint might lag (as it is only advanced on fsync)
// periodically ask for the newest local checkpoint by syncing the global checkpoint, so that ultimately the global
// checkpoint can be synced. Also take into account that a shard might be pending sync, which means that it isn't
// in the in-sync set just yet but might be blocked on waiting for its persisted local checkpoint to catch up to
// the global checkpoint.
final boolean syncNeeded = (asyncDurability
&& (stats.getGlobalCheckpoint() < stats.getMaxSeqNo() || replicationTracker.pendingInSync()))
// check if the persisted global checkpoint
|| StreamSupport.stream(globalCheckpoints.values().spliterator(), false).anyMatch(v -> v < globalCheckpoint);
// only sync if index is not closed and there is a shard lagging the primary
if (syncNeeded && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN) {
logger.trace("syncing global checkpoint for [{}]", reason);
globalCheckpointSyncer.run();
}
}
}
/**
* Returns the current replication group for the shard.
*
* @return the replication group
*/
public ReplicationGroup getReplicationGroup() {
assert assertPrimaryMode();
verifyNotClosed();
ReplicationGroup replicationGroup = replicationTracker.getReplicationGroup();
// PendingReplicationActions is dependent on ReplicationGroup. Every time we expose ReplicationGroup,
// ensure PendingReplicationActions is updated with the newest version to prevent races.
pendingReplicationActions.accept(replicationGroup);
return replicationGroup;
}
/**
* Returns the pending replication actions for the shard.
*
* @return the pending replication actions
*/
public PendingReplicationActions getPendingReplicationActions() {
assert assertPrimaryMode();
verifyNotClosed();
return pendingReplicationActions;
}
/**
* Updates the global checkpoint on a replica shard after it has been updated by the primary.
*
* @param globalCheckpoint the global checkpoint
* @param reason the reason the global checkpoint was updated
*/
public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final String reason) {
assert assertReplicationTarget();
final long localCheckpoint = getLocalCheckpoint();
if (globalCheckpoint > localCheckpoint) {
/*
* This can happen during recovery when the shard has started its engine but recovery is not finalized and is receiving global
* checkpoint updates. However, since this shard is not yet contributing to calculating the global checkpoint, it can be the
* case that the global checkpoint update from the primary is ahead of the local checkpoint on this shard. In this case, we
* ignore the global checkpoint update. This can happen if we are in the translog stage of recovery. Prior to this, the engine
* is not opened and this shard will not receive global checkpoint updates, and after this the shard will be contributing to
* calculations of the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as
* while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move
* to recovery finalization, or even finished recovery before the update arrives here.
* When remote translog is enabled for an index, replication operation is limited to primary term validation and does not
* update local checkpoint at replica, so the local checkpoint at replica can be less than globalCheckpoint.
*/
assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED)
|| indexSettings.isAssignedOnRemoteNode() : "supposedly in-sync shard copy received a global checkpoint ["
+ globalCheckpoint
+ "] "
+ "that is higher than its local checkpoint ["
+ localCheckpoint
+ "]";
return;
}
replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, reason);
}
/**
* Updates the known allocation IDs and the local checkpoints for the corresponding allocations from a primary relocation source.
*
* @param primaryContext the sequence number context
*/
public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) {
assert shardRouting.primary() && shardRouting.isRelocationTarget()
: "only primary relocation target can update allocation IDs from primary context: " + shardRouting;
assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) : "primary context ["
+ primaryContext
+ "] does not contain relocation target ["
+ routingEntry()
+ "]";
String allocationId = routingEntry().allocationId().getId();
if (isRemoteStoreEnabled() || isMigratingToRemote()) {
// For remote backed indexes, old primary may not have updated value of local checkpoint of new primary.
// But the new primary is always updated with data in remote sore and is at par with old primary.
// So, we can use a stricter check where local checkpoint of new primary is checked against that of old primary.
allocationId = primaryContext.getRoutingTable().primaryShard().allocationId().getId();
}
assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(allocationId).getLocalCheckpoint()
|| indexSettings().getTranslogDurability() == Durability.ASYNC : "local checkpoint ["
+ getLocalCheckpoint()
+ "] does not match checkpoint from primary context ["
+ primaryContext
+ "]";
synchronized (mutex) {
replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex
}
postActivatePrimaryMode();
}
private void postActivatePrimaryMode() {
if (indexSettings.isAssignedOnRemoteNode()) {
// We make sure to upload translog (even if it does not contain any operations) to remote translog.
// This helps to get a consistent state in remote store where both remote segment store and remote
// translog contains data.
try {
getEngine().translogManager().syncTranslog();
} catch (IOException e) {
logger.error("Failed to sync translog to remote from new primary", e);
}
}
ensurePeerRecoveryRetentionLeasesExist();
}
private void ensurePeerRecoveryRetentionLeasesExist() {
threadPool.generic()
.execute(
() -> replicationTracker.createMissingPeerRecoveryRetentionLeases(
ActionListener.wrap(
r -> logger.trace("created missing peer recovery retention leases"),
e -> logger.debug("failed creating missing peer recovery retention leases", e)
)
)
);
}
/**
* Check if there are any recoveries pending in-sync.
*
* @return {@code true} if there is at least one shard pending in-sync, otherwise false
*/
public boolean pendingInSync() {
assert assertPrimaryMode();
return replicationTracker.pendingInSync();
}
/**
* Should be called for each no-op update operation to increment relevant statistics.
*/
public void noopUpdate() {
internalIndexingStats.noopUpdate();
}
public void maybeCheckIndex() {
recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX);
if (Booleans.isTrue(checkIndexOnStartup) || "checksum".equals(checkIndexOnStartup)) {
try {
checkIndex();
} catch (IOException ex) {
throw new RecoveryFailedException(recoveryState, "check index failed", ex);
}
}
}
void checkIndex() throws IOException {
if (store.tryIncRef()) {
try {
doCheckIndex();
} catch (IOException e) {
store.markStoreCorrupted(e);
throw e;
} finally {
store.decRef();
}
}
}
private void doCheckIndex() throws IOException {
long timeNS = System.nanoTime();
if (!Lucene.indexExists(store.directory())) {
return;
}
try (BytesStreamOutput os = new BytesStreamOutput(); PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name())) {
if ("checksum".equals(checkIndexOnStartup)) {
// physical verification only: verify all checksums for the latest commit
IOException corrupt = null;
MetadataSnapshot metadata = snapshotStoreMetadata();
for (Map.Entry<String, StoreFileMetadata> entry : metadata.asMap().entrySet()) {
try {
Store.checkIntegrity(entry.getValue(), store.directory());
out.println("checksum passed: " + entry.getKey());
} catch (IOException exc) {
out.println("checksum failed: " + entry.getKey());
exc.printStackTrace(out);
corrupt = exc;
}
}
out.flush();
if (corrupt != null) {
logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
throw corrupt;
}
} else {
// full checkindex
final CheckIndex.Status status = store.checkIndex(out);
out.flush();
if (!status.clean) {
if (state == IndexShardState.CLOSED) {
// ignore if closed....
return;
}
logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
throw new IOException("index check failure");
}
}
if (logger.isDebugEnabled()) {
logger.debug("check index [success]\n{}", os.bytes().utf8ToString());
}
}
recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - timeNS)));
}
Engine getEngine() {
Engine engine = getEngineOrNull();
if (engine == null) {
throw new AlreadyClosedException("engine is closed");
}
return engine;
}
/**
* NOTE: returns null if engine is not yet started (e.g. recovery phase 1, copying over index files, is still running), or if engine is
* closed.
*/
protected Engine getEngineOrNull() {
return this.currentEngineReference.get();
}
public void startRecovery(
RecoveryState recoveryState,
PeerRecoveryTargetService recoveryTargetService,
RecoveryListener recoveryListener,
RepositoriesService repositoriesService,
Consumer<MappingMetadata> mappingUpdateConsumer,
IndicesService indicesService
) {
// TODO: Create a proper object to encapsulate the recovery context
// all of the current methods here follow a pattern of:
// resolve context which isn't really dependent on the local shards and then async
// call some external method with this pointer.
// with a proper recovery context object we can simply change this to:
// startRecovery(RecoveryState recoveryState, ShardRecoverySource source ) {
// markAsRecovery("from " + source.getShortDescription(), recoveryState);
// threadPool.generic().execute() {
// onFailure () { listener.failure() };
// doRun() {
// if (source.recover(this)) {
// recoveryListener.onRecoveryDone(recoveryState);
// }
// }
// }}
// }
logger.debug("startRecovery type={}", recoveryState.getRecoverySource().getType());
assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource());
switch (recoveryState.getRecoverySource().getType()) {
case EMPTY_STORE:
case EXISTING_STORE:
executeRecovery("from store", recoveryState, recoveryListener, this::recoverFromStore);
break;
case REMOTE_STORE:
executeRecovery("from remote store", recoveryState, recoveryListener, l -> restoreFromRemoteStore(l));
break;
case PEER:
try {
markAsRecovering("from " + recoveryState.getSourceNode(), recoveryState);
recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener);
} catch (Exception e) {
failShard("corrupted preexisting index", e);
recoveryListener.onFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
}
break;
case SNAPSHOT:
final SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) recoveryState.getRecoverySource();
if (recoverySource.isSearchableSnapshot()) {
executeRecovery("from snapshot (remote)", recoveryState, recoveryListener, this::recoverFromStore);
} else if (recoverySource.remoteStoreIndexShallowCopy()) {
final String repo = recoverySource.snapshot().getRepository();
executeRecovery(
"from snapshot and remote store",
recoveryState,
recoveryListener,
l -> restoreFromSnapshotAndRemoteStore(repositoriesService.repository(repo), repositoriesService, l)
);
// indicesService.indexService(shardRouting.shardId().getIndex()).addMetadataListener();
} else {
final String repo = recoverySource.snapshot().getRepository();
executeRecovery(
"from snapshot",
recoveryState,
recoveryListener,
l -> restoreFromRepository(repositoriesService.repository(repo), l)
);
}
break;
case LOCAL_SHARDS:
final IndexMetadata indexMetadata = indexSettings().getIndexMetadata();
final Index resizeSourceIndex = indexMetadata.getResizeSourceIndex();
final List<IndexShard> startedShards = new ArrayList<>();
final IndexService sourceIndexService = indicesService.indexService(resizeSourceIndex);
final Set<ShardId> requiredShards;
final int numShards;
if (sourceIndexService != null) {
requiredShards = IndexMetadata.selectRecoverFromShards(
shardId().id(),
sourceIndexService.getMetadata(),
indexMetadata.getNumberOfShards()
);
for (IndexShard shard : sourceIndexService) {
if (shard.state() == IndexShardState.STARTED && requiredShards.contains(shard.shardId())) {
startedShards.add(shard);
}
}
numShards = requiredShards.size();
} else {
numShards = -1;
requiredShards = Collections.emptySet();
}
if (numShards == startedShards.size()) {
assert requiredShards.isEmpty() == false;
executeRecovery(
"from local shards",
recoveryState,
recoveryListener,
l -> recoverFromLocalShards(
mappingUpdateConsumer,
startedShards.stream().filter((s) -> requiredShards.contains(s.shardId())).collect(Collectors.toList()),
l
)
);
} else {
final RuntimeException e;
if (numShards == -1) {
e = new IndexNotFoundException(resizeSourceIndex);
} else {
e = new IllegalStateException(
"not all required shards of index "
+ resizeSourceIndex
+ " are started yet, expected "
+ numShards
+ " found "
+ startedShards.size()
+ " can't recover shard "
+ shardId()
);
}
throw e;
}
break;
default:
throw new IllegalArgumentException("Unknown recovery source " + recoveryState.getRecoverySource());
}
}
private void executeRecovery(
String reason,
RecoveryState recoveryState,
RecoveryListener recoveryListener,
CheckedConsumer<ActionListener<Boolean>, Exception> action
) {
markAsRecovering(reason, recoveryState); // mark the shard as recovering on the cluster state thread
threadPool.generic().execute(ActionRunnable.wrap(ActionListener.wrap(r -> {
if (r) {
recoveryListener.onDone(recoveryState);
}
}, e -> recoveryListener.onFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true)), action));
}
/**
* Returns whether the shard is a relocated primary, i.e. not in charge anymore of replicating changes (see {@link ReplicationTracker}).
*/
public boolean isRelocatedPrimary() {
assert shardRouting.primary() : "only call isRelocatedPrimary on primary shard";
return replicationTracker.isRelocated();
}
public RetentionLease addPeerRecoveryRetentionLease(
String nodeId,
long globalCheckpoint,
ActionListener<ReplicationResponse> listener
) {
assert assertPrimaryMode();
// only needed for BWC reasons involving rolling upgrades from versions that do not support PRRLs:
assert indexSettings.isSoftDeleteEnabled() == false;
return replicationTracker.addPeerRecoveryRetentionLease(nodeId, globalCheckpoint, listener);
}
public RetentionLease cloneLocalPeerRecoveryRetentionLease(String nodeId, ActionListener<ReplicationResponse> listener) {
assert assertPrimaryMode();
return replicationTracker.cloneLocalPeerRecoveryRetentionLease(nodeId, listener);
}
public void removePeerRecoveryRetentionLease(String nodeId, ActionListener<ReplicationResponse> listener) {
assert assertPrimaryMode();
replicationTracker.removePeerRecoveryRetentionLease(nodeId, listener);
}
/**
* Returns a list of retention leases for peer recovery installed in this shard copy.
*/
public List<RetentionLease> getPeerRecoveryRetentionLeases() {
return replicationTracker.getPeerRecoveryRetentionLeases();
}
public boolean useRetentionLeasesInPeerRecovery() {
return useRetentionLeasesInPeerRecovery;
}
private SafeCommitInfo getSafeCommitInfo() {
final Engine engine = getEngineOrNull();
return engine == null ? SafeCommitInfo.EMPTY : engine.getSafeCommitInfo();
}
class ShardEventListener implements Engine.EventListener {
private final CopyOnWriteArrayList<Consumer<ShardFailure>> delegates = new CopyOnWriteArrayList<>();
// called by the current engine
@Override
public void onFailedEngine(String reason, @Nullable Exception failure) {
final ShardFailure shardFailure = new ShardFailure(shardRouting, reason, failure);
for (Consumer<ShardFailure> listener : delegates) {
try {
listener.accept(shardFailure);
} catch (Exception inner) {
inner.addSuppressed(failure);
logger.warn("exception while notifying engine failure", inner);
}
}
}
}
private static void persistMetadata(
final ShardPath shardPath,
final IndexSettings indexSettings,
final ShardRouting newRouting,
final @Nullable ShardRouting currentRouting,
final Logger logger
) throws IOException {
assert newRouting != null : "newRouting must not be null";
// only persist metadata if routing information that is persisted in shard state metadata actually changed
final ShardId shardId = newRouting.shardId();
if (currentRouting == null
|| currentRouting.primary() != newRouting.primary()
|| currentRouting.allocationId().equals(newRouting.allocationId()) == false) {
assert currentRouting == null || currentRouting.isSameAllocation(newRouting);
final String writeReason;
if (currentRouting == null) {
writeReason = "initial state with allocation id [" + newRouting.allocationId() + "]";
} else {
writeReason = "routing changed from " + currentRouting + " to " + newRouting;
}
logger.trace("{} writing shard state, reason [{}]", shardId, writeReason);
final ShardStateMetadata.IndexDataLocation indexDataLocation = IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.exists(
indexSettings.getSettings()
) ? ShardStateMetadata.IndexDataLocation.REMOTE : ShardStateMetadata.IndexDataLocation.LOCAL;
final ShardStateMetadata newShardStateMetadata = new ShardStateMetadata(
newRouting.primary(),
indexSettings.getUUID(),
newRouting.allocationId(),
indexDataLocation
);
ShardStateMetadata.FORMAT.writeAndCleanup(newShardStateMetadata, shardPath.getShardStatePath());
} else {
logger.trace("{} skip writing shard state, has been written before", shardId);
}
}
private DocumentMapperForType docMapper() {
return mapperService.documentMapperWithAutoCreate();
}
private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) throws IOException {
final Sort indexSort = indexSortSupplier.get();
final Engine.Warmer warmer = reader -> {
assert Thread.holdsLock(mutex) == false : "warming engine under mutex";
assert reader != null;
if (this.warmer != null) {
this.warmer.warm(reader);
}
};
internalRefreshListener.clear();
internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric));
if (indexSettings.isSegRepEnabledOrRemoteNode()) {
internalRefreshListener.add(new ReplicationCheckpointUpdater());
}
if (this.checkpointPublisher != null && shardRouting.primary() && indexSettings.isSegRepLocalEnabled()) {
internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher));
}
if (isRemoteStoreEnabled() || isMigratingToRemote()) {
internalRefreshListener.add(
new RemoteStoreRefreshListener(
this,
this.checkpointPublisher,
remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId())
)
);
}
/*
With segment replication enabled for primary relocation, recover replica shard initially as read only and
change to a writeable engine during relocation handoff after a round of segment replication.
*/
boolean isReadOnlyReplica = indexSettings.isSegRepEnabledOrRemoteNode()
&& (shardRouting.primary() == false
|| (shardRouting.isRelocationTarget() && recoveryState.getStage() != RecoveryState.Stage.FINALIZE));
// For mixed mode, when relocating from doc rep to remote node, we use a writeable engine
if (shouldSeedRemoteStore()) {
isReadOnlyReplica = false;
}
return this.engineConfigFactory.newEngineConfig(
shardId,
threadPool,
indexSettings,
warmer,
store,
indexSettings.getMergePolicy(isTimeSeriesIndex),
mapperService != null ? mapperService.indexAnalyzer() : null,
similarityService.similarity(mapperService),
engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService),
shardEventListener,
indexCache != null ? indexCache.query() : null,
cachingPolicy,
translogConfig,
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
Arrays.asList(refreshListeners, refreshPendingLocationListener),
internalRefreshListener,
indexSort,
circuitBreakerService,
globalCheckpointSupplier,
replicationTracker::getRetentionLeases,
this::getOperationPrimaryTerm,
tombstoneDocSupplier(),
isReadOnlyReplica,
this::enableUploadToRemoteTranslog,
translogFactorySupplier.apply(indexSettings, shardRouting),
isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for
// timeseries
);
}
private boolean isRemoteStoreEnabled() {
return (remoteStore != null && shardRouting.primary());
}
public boolean isRemoteTranslogEnabled() {
return indexSettings() != null && (indexSettings().isRemoteTranslogStoreEnabled());
}
/**
* This checks if we are in state to upload to remote store. Until the cluster-manager informs the shard through
* cluster state, the shard will not be in STARTED state. This method is used to prevent pre-emptive segment or
* translog uploads.
*/
public boolean isStartedPrimary() {
return (getReplicationTracker().isPrimaryMode() && state() == IndexShardState.STARTED);
}
public boolean enableUploadToRemoteTranslog() {
return isStartedPrimary() || (shouldSeedRemoteStore() && hasOneRemoteSegmentSyncHappened());
}
private boolean hasOneRemoteSegmentSyncHappened() {
assert indexSettings.isAssignedOnRemoteNode();
// We upload remote translog only after one remote segment upload in case of migration
RemoteSegmentStoreDirectory rd = getRemoteDirectory();
AtomicBoolean segment_n_uploaded = new AtomicBoolean(false);
rd.getSegmentsUploadedToRemoteStore().forEach((key, value) -> {
if (key.startsWith("segments")) {
segment_n_uploaded.set(true);
}
});
return segment_n_uploaded.get();
}
/**
* @return true if segment reverse search optimization is enabled for time series based workload.
*/
public boolean isTimeSeriesDescSortOptimizationEnabled() {
// Do not change segment order in case of index sort.
return isTimeSeriesIndex && getIndexSort() == null;
}
/**
* @return True if settings indicate this shard is backed by a remote snapshot, false otherwise.
*/
public boolean isRemoteSnapshot() {
return indexSettings != null && indexSettings.isRemoteSnapshot();
}
/**
* Acquire a primary operation permit whenever the shard is ready for indexing. If a permit is directly available, the provided
* ActionListener will be called on the calling thread. During relocation hand-off, permit acquisition can be delayed. The provided
* ActionListener will then be called using the provided executor.
*
* @param debugInfo an extra information that can be useful when tracing an unreleased permit. When assertions are enabled
* the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object
* isn't used
*/
public void acquirePrimaryOperationPermit(ActionListener<Releasable> onPermitAcquired, String executorOnDelay, Object debugInfo) {
acquirePrimaryOperationPermit(onPermitAcquired, executorOnDelay, debugInfo, false);
}
public void acquirePrimaryOperationPermit(
ActionListener<Releasable> onPermitAcquired,
String executorOnDelay,
Object debugInfo,
boolean forceExecution
) {
verifyNotClosed();
assert shardRouting.primary() : "acquirePrimaryOperationPermit should only be called on primary shard: " + shardRouting;
indexShardOperationPermits.acquire(
wrapPrimaryOperationPermitListener(onPermitAcquired),
executorOnDelay,
forceExecution,
debugInfo
);
}
/**
* Acquire all primary operation permits. Once all permits are acquired, the provided ActionListener is called.
* It is the responsibility of the caller to close the {@link Releasable}.
*/
public void acquireAllPrimaryOperationsPermits(final ActionListener<Releasable> onPermitAcquired, final TimeValue timeout) {
verifyNotClosed();
assert shardRouting.primary() : "acquireAllPrimaryOperationsPermits should only be called on primary shard: " + shardRouting;
asyncBlockOperations(wrapPrimaryOperationPermitListener(onPermitAcquired), timeout.duration(), timeout.timeUnit());
}
/**
* Wraps the action to run on a primary after acquiring permit. This wrapping is used to check if the shard is in primary mode before
* executing the action.
*
* @param listener the listener to wrap
* @return the wrapped listener
*/
private ActionListener<Releasable> wrapPrimaryOperationPermitListener(final ActionListener<Releasable> listener) {
return ActionListener.delegateFailure(listener, (l, r) -> {
if (replicationTracker.isPrimaryMode()) {
l.onResponse(r);
} else {
r.close();
l.onFailure(new ShardNotInPrimaryModeException(shardId, state));
}
});
}
private void asyncBlockOperations(ActionListener<Releasable> onPermitAcquired, long timeout, TimeUnit timeUnit) {
final Releasable forceRefreshes = refreshListeners.forceRefreshes();
final ActionListener<Releasable> wrappedListener = ActionListener.wrap(r -> {
forceRefreshes.close();
onPermitAcquired.onResponse(r);
}, e -> {
forceRefreshes.close();
onPermitAcquired.onFailure(e);
});
try {
indexShardOperationPermits.asyncBlockOperations(wrappedListener, timeout, timeUnit);
} catch (Exception e) {
forceRefreshes.close();
throw e;
}
}
/**
* Runs the specified runnable under a permit and otherwise calling back the specified failure callback. This method is really a
* convenience for {@link #acquirePrimaryOperationPermit(ActionListener, String, Object)} where the listener equates to
* try-with-resources closing the releasable after executing the runnable on successfully acquiring the permit, an otherwise calling
* back the failure callback.
*
* @param runnable the runnable to execute under permit
* @param onFailure the callback on failure
* @param executorOnDelay the executor to execute the runnable on if permit acquisition is blocked
* @param debugInfo debug info
*/
public void runUnderPrimaryPermit(
final Runnable runnable,
final Consumer<Exception> onFailure,
final String executorOnDelay,
final Object debugInfo
) {
verifyNotClosed();
assert shardRouting.primary() : "runUnderPrimaryPermit should only be called on primary shard but was " + shardRouting;
final ActionListener<Releasable> onPermitAcquired = ActionListener.wrap(releasable -> {
try (Releasable ignore = releasable) {
runnable.run();
}
}, onFailure);
acquirePrimaryOperationPermit(onPermitAcquired, executorOnDelay, debugInfo);
}
private <E extends Exception> void bumpPrimaryTerm(
final long newPrimaryTerm,
final CheckedRunnable<E> onBlocked,
@Nullable ActionListener<Releasable> combineWithAction
) {
assert Thread.holdsLock(mutex);
assert newPrimaryTerm > pendingPrimaryTerm || (newPrimaryTerm >= pendingPrimaryTerm && combineWithAction != null);
assert getOperationPrimaryTerm() <= pendingPrimaryTerm;
final CountDownLatch termUpdated = new CountDownLatch(1);
asyncBlockOperations(new ActionListener<Releasable>() {
@Override
public void onFailure(final Exception e) {
try {
innerFail(e);
} finally {
if (combineWithAction != null) {
combineWithAction.onFailure(e);
}
}
}
private void innerFail(final Exception e) {
try {
failShard("exception during primary term transition", e);
} catch (AlreadyClosedException ace) {
// ignore, shard is already closed
}
}
@Override
public void onResponse(final Releasable releasable) {
final RunOnce releaseOnce = new RunOnce(releasable::close);
try {
assert getOperationPrimaryTerm() <= pendingPrimaryTerm;
termUpdated.await();
// indexShardOperationPermits doesn't guarantee that async submissions are executed
// in the order submitted. We need to guard against another term bump
if (getOperationPrimaryTerm() < newPrimaryTerm) {
replicationTracker.setOperationPrimaryTerm(newPrimaryTerm);
onBlocked.run();
}
} catch (final Exception e) {
if (combineWithAction == null) {
// otherwise leave it to combineWithAction to release the permit
releaseOnce.run();
}
innerFail(e);
} finally {
if (combineWithAction != null) {
combineWithAction.onResponse(releasable);
} else {
releaseOnce.run();
}
}
}
}, 30, TimeUnit.MINUTES);
pendingPrimaryTerm = newPrimaryTerm;
termUpdated.countDown();
}
/**
* Acquire a replica operation permit whenever the shard is ready for indexing (see
* {@link #acquirePrimaryOperationPermit(ActionListener, String, Object)}). If the given primary term is lower than then one in
* {@link #shardRouting}, the {@link ActionListener#onFailure(Exception)} method of the provided listener is invoked with an
* {@link IllegalStateException}. If permit acquisition is delayed, the listener will be invoked on the executor with the specified
* name.
*
* @param opPrimaryTerm the operation primary term
* @param globalCheckpoint the global checkpoint associated with the request
* @param maxSeqNoOfUpdatesOrDeletes the max seq_no of updates (index operations overwrite Lucene) or deletes captured on the primary
* after this replication request was executed on it (see {@link #getMaxSeqNoOfUpdatesOrDeletes()}
* @param onPermitAcquired the listener for permit acquisition
* @param executorOnDelay the name of the executor to invoke the listener on if permit acquisition is delayed
* @param debugInfo an extra information that can be useful when tracing an unreleased permit. When assertions are
* enabled the tracing will capture the supplied object's {@link Object#toString()} value.
* Otherwise the object isn't used
*/
public void acquireReplicaOperationPermit(
final long opPrimaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes,
final ActionListener<Releasable> onPermitAcquired,
final String executorOnDelay,
final Object debugInfo
) {
innerAcquireReplicaOperationPermit(
opPrimaryTerm,
globalCheckpoint,
maxSeqNoOfUpdatesOrDeletes,
onPermitAcquired,
false,
(listener) -> indexShardOperationPermits.acquire(listener, executorOnDelay, true, debugInfo)
);
}
/**
* Acquire all replica operation permits whenever the shard is ready for indexing (see
* {@link #acquireAllPrimaryOperationsPermits(ActionListener, TimeValue)}. If the given primary term is lower than then one in
* {@link #shardRouting}, the {@link ActionListener#onFailure(Exception)} method of the provided listener is invoked with an
* {@link IllegalStateException}.
*
* @param opPrimaryTerm the operation primary term
* @param globalCheckpoint the global checkpoint associated with the request
* @param maxSeqNoOfUpdatesOrDeletes the max seq_no of updates (index operations overwrite Lucene) or deletes captured on the primary
* after this replication request was executed on it (see {@link #getMaxSeqNoOfUpdatesOrDeletes()}
* @param onPermitAcquired the listener for permit acquisition
* @param timeout the maximum time to wait for the in-flight operations block
*/
public void acquireAllReplicaOperationsPermits(
final long opPrimaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes,
final ActionListener<Releasable> onPermitAcquired,
final TimeValue timeout
) {
innerAcquireReplicaOperationPermit(
opPrimaryTerm,
globalCheckpoint,
maxSeqNoOfUpdatesOrDeletes,
onPermitAcquired,
true,
listener -> asyncBlockOperations(listener, timeout.duration(), timeout.timeUnit())
);
}
private void innerAcquireReplicaOperationPermit(
final long opPrimaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes,
final ActionListener<Releasable> onPermitAcquired,
final boolean allowCombineOperationWithPrimaryTermUpdate,
final Consumer<ActionListener<Releasable>> operationExecutor
) {
verifyNotClosed();
// This listener is used for the execution of the operation. If the operation requires all the permits for its
// execution and the primary term must be updated first, we can combine the operation execution with the
// primary term update. Since indexShardOperationPermits doesn't guarantee that async submissions are executed
// in the order submitted, combining both operations ensure that the term is updated before the operation is
// executed. It also has the side effect of acquiring all the permits one time instead of two.
final ActionListener<Releasable> operationListener = ActionListener.delegateFailure(
onPermitAcquired,
(delegatedListener, releasable) -> {
if (opPrimaryTerm < getOperationPrimaryTerm()) {
releasable.close();
final String message = String.format(
Locale.ROOT,
"%s operation primary term [%d] is too old (current [%d])",
shardId,
opPrimaryTerm,
getOperationPrimaryTerm()
);
delegatedListener.onFailure(new IllegalStateException(message));
} else {
assert assertReplicationTarget();
try {
updateGlobalCheckpointOnReplica(globalCheckpoint, "operation");
advanceMaxSeqNoOfUpdatesOrDeletes(maxSeqNoOfUpdatesOrDeletes);
} catch (Exception e) {
releasable.close();
delegatedListener.onFailure(e);
return;
}
delegatedListener.onResponse(releasable);
}
}
);
if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) {
synchronized (mutex) {
if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) {
final IndexShardState shardState = state();
// only roll translog and update primary term if shard has made it past recovery
// Having a new primary term here means that the old primary failed and that there is a new primary, which again
// means that the cluster-manager will fail this shard as all initializing shards are failed when a primary is selected
// We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint
if (shardState != IndexShardState.POST_RECOVERY && shardState != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, shardState);
}
bumpPrimaryTerm(opPrimaryTerm, () -> {
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
final long currentGlobalCheckpoint = getLastKnownGlobalCheckpoint();
final long maxSeqNo = seqNoStats().getMaxSeqNo();
logger.info(
"detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]",
opPrimaryTerm,
currentGlobalCheckpoint,
maxSeqNo
);
// With Segment Replication enabled, we never want to reset a replica's engine unless
// it is promoted to primary.
if (currentGlobalCheckpoint < maxSeqNo && indexSettings.isSegRepEnabledOrRemoteNode() == false) {
resetEngineToGlobalCheckpoint();
} else {
getEngine().translogManager().rollTranslogGeneration();
}
}, allowCombineOperationWithPrimaryTermUpdate ? operationListener : null);
if (allowCombineOperationWithPrimaryTermUpdate) {
logger.debug("operation execution has been combined with primary term update");
return;
}
}
}
}
assert opPrimaryTerm <= pendingPrimaryTerm : "operation primary term ["
+ opPrimaryTerm
+ "] should be at most ["
+ pendingPrimaryTerm
+ "]";
operationExecutor.accept(operationListener);
}
private boolean requirePrimaryTermUpdate(final long opPrimaryTerm, final boolean allPermits) {
return (opPrimaryTerm > pendingPrimaryTerm) || (allPermits && opPrimaryTerm > getOperationPrimaryTerm());
}
public static final int OPERATIONS_BLOCKED = -1;
/**
* Obtain the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} if all permits are held (even if there are
* outstanding operations in flight).
*
* @return the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} when all permits are held.
*/
public int getActiveOperationsCount() {
return indexShardOperationPermits.getActiveOperationsCount();
}
/**
* @return a list of describing each permit that wasn't released yet. The description consist of the debugInfo supplied
* when the permit was acquired plus a stack traces that was captured when the permit was request.
*/
public List<String> getActiveOperations() {
return indexShardOperationPermits.getActiveOperations();
}
private final AsyncIOProcessor<Translog.Location> translogSyncProcessor;
private static AsyncIOProcessor<Translog.Location> createTranslogSyncProcessor(
Logger logger,
ThreadPool threadPool,
Supplier<Engine> engineSupplier,
boolean bufferAsyncIoProcessor,
Supplier<TimeValue> bufferIntervalSupplier
) {
assert bufferAsyncIoProcessor == false || Objects.nonNull(bufferIntervalSupplier)
: "If bufferAsyncIoProcessor is true, then the bufferIntervalSupplier needs to be non null";
ThreadContext threadContext = threadPool.getThreadContext();
CheckedConsumer<List<Tuple<Translog.Location, Consumer<Exception>>>, IOException> writeConsumer = candidates -> {
try {
engineSupplier.get().translogManager().ensureTranslogSynced(candidates.stream().map(Tuple::v1));
} catch (AlreadyClosedException ex) {
// that's fine since we already synced everything on engine close - this also is conform with the methods
// documentation
} catch (IOException ex) { // if this fails we are in deep shit - fail the request
logger.debug("failed to sync translog", ex);
throw ex;
}
};
if (bufferAsyncIoProcessor) {
return new BufferedAsyncIOProcessor<>(logger, 102400, threadContext, threadPool, bufferIntervalSupplier) {
@Override
protected void write(List<Tuple<Translog.Location, Consumer<Exception>>> candidates) throws IOException {
writeConsumer.accept(candidates);
}
@Override
protected String getBufferProcessThreadPoolName() {
return ThreadPool.Names.TRANSLOG_SYNC;
}
};
}
return new AsyncIOProcessor<>(logger, 1024, threadContext) {
@Override
protected void write(List<Tuple<Translog.Location, Consumer<Exception>>> candidates) throws IOException {
writeConsumer.accept(candidates);
}
};
}
/**
* Syncs the given location with the underlying storage unless already synced. This method might return immediately without
* actually fsyncing the location until the sync listener is called. Yet, unless there is already another thread fsyncing
* the transaction log the caller thread will be hijacked to run the fsync for all pending fsync operations.
* This method allows indexing threads to continue indexing without blocking on fsync calls. We ensure that there is only
* one thread blocking on the sync an all others can continue indexing.
* NOTE: if the syncListener throws an exception when it's processed the exception will only be logged. Users should make sure that the
* listener handles all exception cases internally.
*/
public final void sync(Translog.Location location, Consumer<Exception> syncListener) {
verifyNotClosed();
translogSyncProcessor.put(location, syncListener);
}
public void sync() throws IOException {
verifyNotClosed();
getEngine().translogManager().syncTranslog();
}
/**
* Checks if the underlying storage sync is required.
*/
public boolean isSyncNeeded() {
return getEngine().translogManager().isTranslogSyncNeeded();
}
/**
* Returns the current translog durability mode
*/
public Durability getTranslogDurability() {
return indexSettings.getTranslogDurability();
}
// we can not protect with a lock since we "release" on a different thread
private final AtomicBoolean flushOrRollRunning = new AtomicBoolean();
/**
* Schedules a flush or translog generation roll if needed but will not schedule more than one concurrently. The operation will be
* executed asynchronously on the flush thread pool.
* Can also schedule a flush if decided by translog manager
*/
public void afterWriteOperation() {
if (shouldPeriodicallyFlush() || shouldRollTranslogGeneration()) {
if (flushOrRollRunning.compareAndSet(false, true)) {
/*
* We have to check again since otherwise there is a race when a thread passes the first check next to another thread which
* performs the operation quickly enough to finish before the current thread could flip the flag. In that situation, we
* have an extra operation.
*
* Additionally, a flush implicitly executes a translog generation roll so if we execute a flush then we do not need to
* check if we should roll the translog generation.
*/
if (shouldPeriodicallyFlush()) {
logger.debug("submitting async flush request");
final AbstractRunnable flush = new AbstractRunnable() {
@Override
public void onFailure(final Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush index", e);
}
}
@Override
protected void doRun() throws IOException {
flush(new FlushRequest());
periodicFlushMetric.inc();
}
@Override
public void onAfter() {
flushOrRollRunning.compareAndSet(true, false);
afterWriteOperation();
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(flush);
} else if (shouldRollTranslogGeneration()) {
logger.debug("submitting async roll translog generation request");
final AbstractRunnable roll = new AbstractRunnable() {
@Override
public void onFailure(final Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to roll translog generation", e);
}
}
@Override
protected void doRun() throws Exception {
rollTranslogGeneration();
}
@Override
public void onAfter() {
flushOrRollRunning.compareAndSet(true, false);
afterWriteOperation();
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(roll);
} else {
flushOrRollRunning.compareAndSet(true, false);
}
}
}
}
/**
* Build {@linkplain RefreshListeners} for this shard.
*/
private RefreshListeners buildRefreshListeners() {
return new RefreshListeners(
indexSettings::getMaxRefreshListeners,
() -> refresh("too_many_listeners"),
logger,
threadPool.getThreadContext(),
externalRefreshMetric
);
}
/**
* Simple struct encapsulating a shard failure
*
* @see IndexShard#addShardFailureCallback(Consumer)
*
* @opensearch.api
*/
@PublicApi(since = "1.0.0")
public static final class ShardFailure {
public final ShardRouting routing;
public final String reason;
@Nullable
public final Exception cause;
public ShardFailure(ShardRouting routing, String reason, @Nullable Exception cause) {
this.routing = routing;
this.reason = reason;
this.cause = cause;
}
}
EngineFactory getEngineFactory() {
return engineFactory;
}
EngineConfigFactory getEngineConfigFactory() {
return engineConfigFactory;
}
// for tests
ReplicationTracker getReplicationTracker() {
return replicationTracker;
}
/**
* Executes a scheduled refresh if necessary.
*
* @return <code>true</code> iff the engine got refreshed otherwise <code>false</code>
*/
public boolean scheduledRefresh() {
verifyNotClosed();
boolean listenerNeedsRefresh = refreshListeners.refreshNeeded();
if (isReadAllowed() && (listenerNeedsRefresh || getEngine().refreshNeeded())) {
if (listenerNeedsRefresh == false // if we have a listener that is waiting for a refresh we need to force it
&& isSearchIdleSupported()
&& isSearchIdle()
&& indexSettings.isExplicitRefresh() == false
&& active.get()) { // it must be active otherwise we might not free up segment memory once the shard became inactive
// lets skip this refresh since we are search idle and
// don't necessarily need to refresh. the next searcher access will register a refreshListener and that will
// cause the next schedule to refresh.
final Engine engine = getEngine();
engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some
setRefreshPending(engine);
return false;
} else {
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [schedule]");
}
return getEngine().maybeRefresh("schedule");
}
}
final Engine engine = getEngine();
engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some
return false;
}
/**
* Returns true if this shards is search idle
*/
public final boolean isSearchIdle() {
return (threadPool.relativeTimeInMillis() - lastSearcherAccess.get()) >= indexSettings.getSearchIdleAfter().getMillis();
}
/**
* Returns true if this shard supports search idle.
* <p>
* Indices using Segment Replication will ignore search idle unless there are no replicas.
* Primary shards push out new segments only
* after a refresh, so we don't want to wait for a search to trigger that cycle. Replicas will only refresh after receiving
* a new set of segments.
*/
public final boolean isSearchIdleSupported() {
// If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh
// task continues to upload to remote store periodically.
if (isRemoteTranslogEnabled() || indexSettings.isAssignedOnRemoteNode()) {
return false;
}
return indexSettings.isSegRepEnabledOrRemoteNode() == false || indexSettings.getNumberOfReplicas() == 0;
}
/**
* Returns the last timestamp the searcher was accessed. This is a relative timestamp in milliseconds.
*/
final long getLastSearcherAccess() {
return lastSearcherAccess.get();
}
/**
* Returns true if this shard has some scheduled refresh that is pending because of search-idle.
*/
public final boolean hasRefreshPending() {
return pendingRefreshLocation.get() != null;
}
private void setRefreshPending(Engine engine) {
final Translog.Location lastWriteLocation = engine.translogManager().getTranslogLastWriteLocation();
pendingRefreshLocation.updateAndGet(curr -> {
if (curr == null || curr.compareTo(lastWriteLocation) <= 0) {
return lastWriteLocation;
} else {
return curr;
}
});
}
private class RefreshPendingLocationListener implements ReferenceManager.RefreshListener {
Translog.Location lastWriteLocation;
@Override
public void beforeRefresh() {
try {
lastWriteLocation = getEngine().translogManager().getTranslogLastWriteLocation();
} catch (AlreadyClosedException exc) {
// shard is closed - no location is fine
lastWriteLocation = null;
}
}
@Override
public void afterRefresh(boolean didRefresh) {
if (didRefresh && lastWriteLocation != null) {
pendingRefreshLocation.updateAndGet(pendingLocation -> {
if (pendingLocation == null || pendingLocation.compareTo(lastWriteLocation) <= 0) {
return null;
} else {
return pendingLocation;
}
});
}
}
}
/**
* Registers the given listener and invokes it once the shard is active again and all
* pending refresh translog location has been refreshed. If there is no pending refresh location registered the listener will be
* invoked immediately.
* @param listener the listener to invoke once the pending refresh location is visible. The listener will be called with
* <code>true</code> if the listener was registered to wait for a refresh.
*/
public final void awaitShardSearchActive(Consumer<Boolean> listener) {
boolean isSearchIdle = isSearchIdle();
markSearcherAccessed(); // move the shard into non-search idle
final Translog.Location location = pendingRefreshLocation.get();
if (location != null) {
if (isSearchIdle) {
SearchOperationListener searchOperationListener = getSearchOperationListener();
searchOperationListener.onSearchIdleReactivation();
}
addRefreshListener(location, (b) -> {
pendingRefreshLocation.compareAndSet(location, null);
listener.accept(true);
});
} else {
listener.accept(false);
}
}
/**
* Add a listener for refreshes.
*
* @param location the location to listen for
* @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with
* false otherwise.
*/
public void addRefreshListener(Translog.Location location, Consumer<Boolean> listener) {
final boolean readAllowed;
if (isReadAllowed()) {
readAllowed = true;
} else {
// check again under postRecoveryMutex. this is important to create a happens before relationship
// between the switch to POST_RECOVERY + associated refresh. Otherwise we may respond
// to a listener before a refresh actually happened that contained that operation.
synchronized (postRecoveryMutex) {
readAllowed = isReadAllowed();
}
}
// NRT Replicas will not accept refresh listeners.
if (readAllowed && isSegmentReplicationAllowed() == false) {
refreshListeners.addOrNotify(location, listener);
} else {
// we're not yet ready fo ready for reads, just ignore refresh cycles
listener.accept(false);
}
}
/**
* Metrics updater for a refresh
*
* @opensearch.internal
*/
private static class RefreshMetricUpdater implements ReferenceManager.RefreshListener {
private final MeanMetric refreshMetric;
private long currentRefreshStartTime;
private Thread callingThread = null;
private RefreshMetricUpdater(MeanMetric refreshMetric) {
this.refreshMetric = refreshMetric;
}
@Override
public void beforeRefresh() throws IOException {
if (Assertions.ENABLED) {
assert callingThread == null : "beforeRefresh was called by "
+ callingThread.getName()
+ " without a corresponding call to afterRefresh";
callingThread = Thread.currentThread();
}
currentRefreshStartTime = System.nanoTime();
}
@Override
public void afterRefresh(boolean didRefresh) throws IOException {
if (Assertions.ENABLED) {
assert callingThread != null : "afterRefresh called but not beforeRefresh";
assert callingThread == Thread.currentThread() : "beforeRefreshed called by a different thread. current ["
+ Thread.currentThread().getName()
+ "], thread that called beforeRefresh ["
+ callingThread.getName()
+ "]";
callingThread = null;
}
refreshMetric.inc(System.nanoTime() - currentRefreshStartTime);
}
}
/**
* Refresh listener to update the Shard's ReplicationCheckpoint post refresh.
*/
private class ReplicationCheckpointUpdater implements ReferenceManager.RefreshListener {
@Override
public void beforeRefresh() throws IOException {}
@Override
public void afterRefresh(boolean didRefresh) throws IOException {
if (didRefresh) {
// We're only starting to track the replication checkpoint. The timers for replication are started when
// the checkpoint is published. This is done so that the timers do not include the time spent by primary
// in uploading the segments to remote store.
updateReplicationCheckpoint();
}
}
}
private void updateReplicationCheckpoint() {
final Tuple<GatedCloseable<SegmentInfos>, ReplicationCheckpoint> tuple = getLatestSegmentInfosAndCheckpoint();
try (final GatedCloseable<SegmentInfos> ignored = tuple.v1()) {
replicationTracker.setLatestReplicationCheckpoint(tuple.v2());
} catch (IOException e) {
throw new OpenSearchException("Error Closing SegmentInfos Snapshot", e);
}
}
private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() {
final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop");
final DocumentMapper noopDocumentMapper = mapperService != null
? new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService)
: null;
return new EngineConfig.TombstoneDocSupplier() {
@Override
public ParsedDocument newDeleteTombstoneDoc(String id) {
return docMapper().getDocumentMapper().createDeleteTombstoneDoc(shardId.getIndexName(), id);
}
@Override
public ParsedDocument newNoopTombstoneDoc(String reason) {
return noopDocumentMapper.createNoopTombstoneDoc(shardId.getIndexName(), reason);
}
};
}
/**
* Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint.
*/
void resetEngineToGlobalCheckpoint() throws IOException {
assert Thread.holdsLock(mutex) == false : "resetting engine under mutex";
assert getActiveOperationsCount() == OPERATIONS_BLOCKED : "resetting engine without blocking operations; active operations are ["
+ getActiveOperations()
+ ']';
sync(); // persist the global checkpoint to disk
final SeqNoStats seqNoStats = seqNoStats();
final TranslogStats translogStats = translogStats();
// flush to make sure the latest commit, which will be opened by the read-only engine, includes all operations.
flush(new FlushRequest().waitIfOngoing(true));
SetOnce<Engine> newEngineReference = new SetOnce<>();
final long globalCheckpoint = getLastKnownGlobalCheckpoint();
assert globalCheckpoint == getLastSyncedGlobalCheckpoint();
synchronized (engineMutex) {
verifyNotClosed();
// we must create both new read-only engine and new read-write engine under engineMutex to ensure snapshotStoreMetadata,
// acquireXXXCommit and close works.
final Engine readOnlyEngine = new ReadOnlyEngine(
newEngineConfig(replicationTracker),
seqNoStats,
translogStats,
false,
Function.identity(),
true
) {
@Override
public GatedCloseable<IndexCommit> acquireLastIndexCommit(boolean flushFirst) {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
// ignore flushFirst since we flushed above and we do not want to interfere with ongoing translog replay
return newEngineReference.get().acquireLastIndexCommit(false);
}
}
@Override
public GatedCloseable<IndexCommit> acquireSafeIndexCommit() {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
return newEngineReference.get().acquireSafeIndexCommit();
}
}
@Override
public GatedCloseable<SegmentInfos> getSegmentInfosSnapshot() {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
return newEngineReference.get().getSegmentInfosSnapshot();
}
}
@Override
public void close() throws IOException {
assert Thread.holdsLock(engineMutex);
Engine newEngine = newEngineReference.get();
if (newEngine == currentEngineReference.get()) {
// we successfully installed the new engine so do not close it.
newEngine = null;
}
IOUtils.close(super::close, newEngine);
}
};
IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine));
if (indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded()) {
syncSegmentsFromRemoteSegmentStore(false);
}
if ((indexSettings.isRemoteTranslogStoreEnabled() || this.isRemoteSeeded()) && shardRouting.primary()) {
syncRemoteTranslogAndUpdateGlobalCheckpoint();
}
newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker)));
onNewEngine(newEngineReference.get());
}
final TranslogRecoveryRunner translogRunner = (snapshot) -> runTranslogRecovery(
newEngineReference.get(),
snapshot,
Engine.Operation.Origin.LOCAL_RESET,
() -> {
// TODO: add a dedicate recovery stats for the reset translog
}
);
// When the new engine is created, translogs are synced from remote store onto local. Since remote store is the source
// of truth for translog, we play all translogs that exists locally. Otherwise, the recoverUpto happens upto global checkpoint.
// We also replay all local translog ops with Segment replication, because on engine swap our local translog may
// hold more ops than the global checkpoint.
long recoverUpto = this.isRemoteTranslogEnabled() || indexSettings().isSegRepEnabledOrRemoteNode()
? Long.MAX_VALUE
: globalCheckpoint;
newEngineReference.get()
.translogManager()
.recoverFromTranslog(translogRunner, newEngineReference.get().getProcessedLocalCheckpoint(), recoverUpto);
newEngineReference.get().refresh("reset_engine");
synchronized (engineMutex) {
verifyNotClosed();
IOUtils.close(currentEngineReference.getAndSet(newEngineReference.get()));
// We set active because we are now writing operations to the engine; this way,
// if we go idle after some time and become inactive, we still give sync'd flush a chance to run.
active.set(true);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
// which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine.
onSettingsChanged();
}
private void syncRemoteTranslogAndUpdateGlobalCheckpoint() throws IOException {
syncTranslogFilesFromRemoteTranslog();
loadGlobalCheckpointToReplicationTracker();
}
public void deleteTranslogFilesFromRemoteTranslog() throws IOException {
TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting);
assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory;
Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository();
RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathStrategy(), remoteStoreSettings);
}
/*
Cleans up remote store and remote translog contents.
This is used in remote store migration, where we want to clean up all stale segment and translog data
and seed the remote store afresh
*/
public void deleteRemoteStoreContents() throws IOException {
deleteTranslogFilesFromRemoteTranslog();
getRemoteDirectory().deleteStaleSegments(0);
}
public void syncTranslogFilesFromRemoteTranslog() throws IOException {
TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting);
assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory;
Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository();
RemoteFsTranslog.download(
repository,
shardId,
getThreadPool(),
shardPath().resolveTranslog(),
indexSettings.getRemoteStorePathStrategy(),
remoteStoreSettings,
logger,
shouldSeedRemoteStore()
);
}
/**
* Downloads segments from remote segment store
* @param overrideLocal flag to override local segment files with those in remote store.
* @throws IOException if exception occurs while reading segments from remote store.
*/
public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOException {
syncSegmentsFromRemoteSegmentStore(overrideLocal, () -> {});
}
/**
* Downloads segments from remote segment store along with updating the access time of the recovery target.
* @param overrideLocal flag to override local segment files with those in remote store.
* @param onFileSync runnable that updates the access time when run.
* @throws IOException if exception occurs while reading segments from remote store.
*/
public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException {
boolean syncSegmentSuccess = false;
long startTimeMs = System.currentTimeMillis();
assert indexSettings.isRemoteStoreEnabled() || this.isRemoteSeeded();
logger.trace("Downloading segments from remote segment store");
RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory();
// We need to call RemoteSegmentStoreDirectory.init() in order to get latest metadata of the files that
// are uploaded to the remote segment store.
RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.init();
Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegments = remoteDirectory
.getSegmentsUploadedToRemoteStore()
.entrySet()
.stream()
.filter(entry -> entry.getKey().startsWith(IndexFileNames.SEGMENTS) == false)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
store.incRef();
remoteStore.incRef();
try {
final Directory storeDirectory;
if (recoveryState.getStage() == RecoveryState.Stage.INDEX) {
storeDirectory = new StoreRecovery.StatsDirectoryWrapper(store.directory(), recoveryState.getIndex());
for (String file : uploadedSegments.keySet()) {
long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum());
if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) {
recoveryState.getIndex().addFileDetail(file, uploadedSegments.get(file).getLength(), false);
} else {
recoveryState.getIndex().addFileDetail(file, uploadedSegments.get(file).getLength(), true);
}
}
} else {
storeDirectory = store.directory();
}
copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync);
if (remoteSegmentMetadata != null) {
final SegmentInfos infosSnapshot = store.buildSegmentInfos(
remoteSegmentMetadata.getSegmentInfosBytes(),
remoteSegmentMetadata.getGeneration()
);
long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY));
// delete any other commits, we want to start the engine only from a new commit made with the downloaded infos bytes.
// Extra segments will be wiped on engine open.
for (String file : List.of(store.directory().listAll())) {
if (file.startsWith(IndexFileNames.SEGMENTS)) {
store.deleteQuiet(file);
}
}
assert Arrays.stream(store.directory().listAll()).filter(f -> f.startsWith(IndexFileNames.SEGMENTS)).findAny().isEmpty()
: "There should not be any segments file in the dir";
store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint);
}
syncSegmentSuccess = true;
} catch (IOException e) {
throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e);
} finally {
logger.trace(
"syncSegmentsFromRemoteSegmentStore success={} elapsedTime={}",
syncSegmentSuccess,
(System.currentTimeMillis() - startTimeMs)
);
store.decRef();
remoteStore.decRef();
}
}
/**
* Downloads segments from given remote segment store for a specific commit.
* @param overrideLocal flag to override local segment files with those in remote store
* @param sourceRemoteDirectory RemoteSegmentDirectory Instance from which we need to sync segments
* @param primaryTerm Primary Term for shard at the time of commit operation for which we are syncing segments
* @param commitGeneration commit generation at the time of commit operation for which we are syncing segments
* @throws IOException if exception occurs while reading segments from remote store
*/
public void syncSegmentsFromGivenRemoteSegmentStore(
boolean overrideLocal,
RemoteSegmentStoreDirectory sourceRemoteDirectory,
long primaryTerm,
long commitGeneration
) throws IOException {
logger.trace("Downloading segments from given remote segment store");
RemoteSegmentStoreDirectory remoteDirectory = null;
if (remoteStore != null) {
remoteDirectory = getRemoteDirectory();
remoteDirectory.init();
remoteStore.incRef();
}
Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegments = sourceRemoteDirectory
.getSegmentsUploadedToRemoteStore();
final Directory storeDirectory = store.directory();
store.incRef();
try {
String segmentsNFile = copySegmentFiles(
storeDirectory,
sourceRemoteDirectory,
remoteDirectory,
uploadedSegments,
overrideLocal,
() -> {}
);
if (segmentsNFile != null) {
try (
ChecksumIndexInput indexInput = new BufferedChecksumIndexInput(
storeDirectory.openInput(segmentsNFile, IOContext.DEFAULT)
)
) {
SegmentInfos infosSnapshot = SegmentInfos.readCommit(store.directory(), indexInput, commitGeneration);
long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY));
if (remoteStore != null) {
store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint);
} else {
store.directory().sync(infosSnapshot.files(true));
store.directory().syncMetaData();
}
}
}
} catch (IOException e) {
throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e);
} finally {
store.decRef();
if (remoteStore != null) {
remoteStore.decRef();
}
}
}
private String copySegmentFiles(
Directory storeDirectory,
RemoteSegmentStoreDirectory sourceRemoteDirectory,
RemoteSegmentStoreDirectory targetRemoteDirectory,
Map<String, RemoteSegmentStoreDirectory.UploadedSegmentMetadata> uploadedSegments,
boolean overrideLocal,
final Runnable onFileSync
) throws IOException {
Set<String> toDownloadSegments = new HashSet<>();
Set<String> skippedSegments = new HashSet<>();
String segmentNFile = null;
try {
if (overrideLocal) {
for (String file : storeDirectory.listAll()) {
storeDirectory.deleteFile(file);
}
}
for (String file : uploadedSegments.keySet()) {
long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum());
if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) {
toDownloadSegments.add(file);
} else {
skippedSegments.add(file);
}
if (file.startsWith(IndexFileNames.SEGMENTS)) {
assert segmentNFile == null : "There should be only one SegmentInfosSnapshot file";
segmentNFile = file;
}
}
if (toDownloadSegments.isEmpty() == false) {
try {
fileDownloader.download(sourceRemoteDirectory, storeDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync);
} catch (Exception e) {
throw new IOException("Error occurred when downloading segments from remote store", e);
}
}
} finally {
logger.trace("Downloaded segments here: {}", toDownloadSegments);
logger.trace("Skipped download for segments here: {}", skippedSegments);
}
return segmentNFile;
}
// Visible for testing
boolean localDirectoryContains(Directory localDirectory, String file, long checksum) throws IOException {
try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) {
if (checksum == CodecUtil.retrieveChecksum(indexInput)) {
return true;
} else {
logger.warn("Checksum mismatch between local and remote segment file: {}, will override local file", file);
// If there is a checksum mismatch and we are not serving reads it is safe to go ahead and delete the file now.
// Outside of engine resets this method will be invoked during recovery so this is safe.
if (isReadAllowed() == false) {
localDirectory.deleteFile(file);
} else {
// segment conflict with remote store while the shard is serving reads.
failShard("Local copy of segment " + file + " has a different checksum than the version in remote store", null);
}
}
} catch (NoSuchFileException | FileNotFoundException e) {
logger.debug("File {} does not exist in local FS, downloading from remote store", file);
} catch (IOException e) {
logger.warn("Exception while reading checksum of file: {}, this can happen if file is corrupted", file);
// For any other exception on reading checksum, we delete the file to re-download again
localDirectory.deleteFile(file);
}
return false;
}
/**
* Returns the maximum sequence number of either update or delete operations have been processed in this shard
* or the sequence number from {@link #advanceMaxSeqNoOfUpdatesOrDeletes(long)}. An index request is considered
* as an update operation if it overwrites the existing documents in Lucene index with the same document id.
* <p>
* The primary captures this value after executes a replication request, then transfers it to a replica before
* executing that replication request on a replica.
*/
public long getMaxSeqNoOfUpdatesOrDeletes() {
return getEngine().getMaxSeqNoOfUpdatesOrDeletes();
}
/**
* A replica calls this method to advance the max_seq_no_of_updates marker of its engine to at least the max_seq_no_of_updates
* value (piggybacked in a replication request) that it receives from its primary before executing that replication request.
* The receiving value is at least as high as the max_seq_no_of_updates on the primary was when any of the operations of that
* replication request were processed on it.
* <p>
* A replica shard also calls this method to bootstrap the max_seq_no_of_updates marker with the value that it received from
* the primary in peer-recovery, before it replays remote translog operations from the primary. The receiving value is at least
* as high as the max_seq_no_of_updates on the primary was when any of these operations were processed on it.
* <p>
* These transfers guarantee that every index/delete operation when executing on a replica engine will observe this marker a value
* which is at least the value of the max_seq_no_of_updates marker on the primary after that operation was executed on the primary.
*
* @see #acquireReplicaOperationPermit(long, long, long, ActionListener, String, Object)
* @see RecoveryTarget#indexTranslogOperations(List, int, long, long, RetentionLeases, long, ActionListener)
*/
public void advanceMaxSeqNoOfUpdatesOrDeletes(long seqNo) {
getEngine().advanceMaxSeqNoOfUpdatesOrDeletes(seqNo);
}
/**
* Performs the pre-closing checks on the {@link IndexShard}.
*
* @throws IllegalStateException if the sanity checks failed
*/
public void verifyShardBeforeIndexClosing() throws IllegalStateException {
getEngine().verifyEngineBeforeIndexClosing();
}
RetentionLeaseSyncer getRetentionLeaseSyncer() {
return retentionLeaseSyncer;
}
/**
* Fetch the latest SegmentInfos held by the shard's underlying Engine, wrapped
* by a a {@link GatedCloseable} to ensure files are not deleted/merged away.
*
* @throws EngineException - When segment infos cannot be safely retrieved
*/
public GatedCloseable<SegmentInfos> getSegmentInfosSnapshot() {
return getEngine().getSegmentInfosSnapshot();
}
private TimeValue getRemoteTranslogUploadBufferInterval(Supplier<TimeValue> clusterRemoteTranslogBufferIntervalSupplier) {
assert Objects.nonNull(clusterRemoteTranslogBufferIntervalSupplier) : "remote translog buffer interval supplier is null";
if (indexSettings().isRemoteTranslogBufferIntervalExplicit()) {
return indexSettings().getRemoteTranslogUploadBufferInterval();
}
return clusterRemoteTranslogBufferIntervalSupplier.get();
}
// Exclusively for testing, please do not use it elsewhere.
public AsyncIOProcessor<Translog.Location> getTranslogSyncProcessor() {
return translogSyncProcessor;
}
enum ShardMigrationState {
REMOTE_NON_MIGRATING,
REMOTE_MIGRATING_SEEDED,
REMOTE_MIGRATING_UNSEEDED,
DOCREP_NON_MIGRATING
}
static ShardMigrationState getShardMigrationState(IndexSettings indexSettings, boolean shouldSeed) {
if (indexSettings.isAssignedOnRemoteNode() && indexSettings.isRemoteStoreEnabled()) {
return REMOTE_NON_MIGRATING;
} else if (indexSettings.isAssignedOnRemoteNode()) {
return shouldSeed ? REMOTE_MIGRATING_UNSEEDED : REMOTE_MIGRATING_SEEDED;
}
return ShardMigrationState.DOCREP_NON_MIGRATING;
}
}
| opensearch-project/OpenSearch | server/src/main/java/org/opensearch/index/shard/IndexShard.java |
1,469 | // FontGenerator8Pixel.java
// ---------------------------
// by Marc Nause
// created: 05.04.2007
//
// $LastChangedDate$
// $LastChangedRevision$
// $LastChangedBy$
//
// based on:
// FontGenerator5Pixle.java
// ---------------------------
// (C) by Michael Peter Christen; [email protected]
// first published on http://www.anomic.de
// Frankfurt, Germany, 2005
// created: 31.10.2005
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package net.yacy.visualization;
public class FontGenerator8Pixel {
//A valid font has 96 characters:
/*
0x20: !"#$%&'
0x28:()*+,-./
0x30:01234567
0x38:89:;<=>?
0x40:@ABCDEFG
0x48:HIJKLMNO
0x50:PQRSTUVW
0x58:XYZ[\]^_
0x60:`abcdefg
0x68:hijklmno
0x70:pqrstuvw
0x78:xyz{|}~
*/
//Each character has the same height of m lines with n pixels each.
//While m does not need to equal n both have to stay consistend through
//the whole font.
public static final String[][] font =
{
{"........", //0x20
"........",
"........",
"........",
"........",
"........",
"........",
"........"},
{"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"........",
"...X....",
"........"},
{"..X.X...",
"..X.X...",
"........",
"........",
"........",
"........",
"........",
"........"},
{"..X.X...",
"..X.X...",
"XXXXXXX.",
"..X.X...",
"XXXXXXX.",
"..X.X...",
"..X.X...",
"........"},
{"...XX...",
".XXXXXX.",
"X..XX...",
".XXXXX..",
"...XX.X.",
"XXXXXX..",
"...X....",
"........"},
{".XX...X.",
".XX..X..",
"....X...",
"...X....",
"..X.....",
".X...XX.",
"X....XX.",
"........"},
{".XX.....",
"X..X....",
".XX.....",
"..X...X.",
"X..X.X..",
"X...X...",
".XXX..X.",
"........"},
{"...X....",
"...X....",
"........",
"........",
"........",
"........",
"........",
"........"},
{"....X...",
"...X....",
"..X.....",
"..X.....",
"..X.....",
"...X....",
"....X...",
"........"},
{"...X....",
"....X...",
".....X..",
".....X..",
".....X..",
"....X...",
"...X....",
"........"},
{"...X....",
".X.X.X..",
"..XXX...",
"XXXXXXX.",
"..XXX...",
".X..X.X.",
"...X....",
"........"},
{"........",
"...X....",
"...X....",
".XXXXX..",
"...X....",
"...X....",
"........",
"........"},
{"........",
"........",
"........",
"........",
"........",
"...XX...",
"...XX...",
"....X..."},
{"........",
"........",
"........",
".XXXXX..",
"........",
"........",
"........",
"........"},
{"........",
"........",
"........",
"........",
"........",
"...XX...",
"...XX...",
"........"},
{"......X.",
".....X..",
"....X...",
"...X....",
"..X.....",
".X......",
"X.......",
"........"},
{".XXXXX..",
"X....XX.",
"X...X.X.",
"X..X..X.",
"X.X...X.",
"XX....X.",
".XXXXX..",
"........"},
{"...X....",
"..XX....",
".X.X....",
"...X....",
"...X....",
"...X....",
".XXXXX..",
"........"},
{"..XXX...",
".X...X..",
".....X..",
"....X...",
"...X....",
"..X.....",
".XXXXX..",
"........"},
{"..XXX...",
".X...X..",
".....X..",
"...XX...",
".....X..",
".X...X..",
"..XXX...",
"........"},
{".....X..",
"....XX..",
"...X.X..",
"..X..X..",
".XXXXXX.",
".....X..",
".....X..",
"........"},
{".XXXXXX.",
".X......",
".X......",
".XXXXX..",
"......X.",
"......X.",
".XXXXX..",
"........"},
{"..XXX...",
".X...X..",
".X......",
".XXXX...",
".X...X..",
".X...X..",
"..XXX...",
"........"},
{".XXXXX..",
".....X..",
".....X..",
"....X...",
"...X....",
"...X....",
"...X....",
"........"},
{"..XXX...",
".X...X..",
".X...X..",
"..XXX...",
".X...X..",
".X...X..",
"..XXX...",
"........"},
{"..XXX...",
".X...X..",
".X...X..",
"..XXXX..",
".....X..",
".X...X..",
"..XXX...",
"........"},
{"........",
"........",
"...XX...",
"...XX...",
"........",
"...XX...",
"...XX...",
"........"},
{"........",
"........",
"...XX...",
"...XX...",
"........",
"...XX...",
"...XX...",
"....X..."},
{".....XX.",
"...XX...",
".XX.....",
"X.......",
".XX.....",
"...XX...",
".....XX.",
"........"},
{"........",
"........",
"..XXXX..",
"........",
"..XXXX..",
"........",
"........",
"........"},
{"XX......",
"..XX....",
"....XX..",
"......X.",
"....XX..",
"..XX....",
"XX......",
"........",},
{"..XXX...",
".X...X..",
".....X..",
"....X...",
"...X....",
"........",
"...X....",
"........"},
{".XXXXX..",
"X.....X.",
"X...XXX.",
"X..X..X.",
"X...XXX.",
"X.......",
".XXXXXX.",
"........"},
{".XXXXX..",
"X.....X.",
"X.....X.",
"XXXXXXX.",
"X.....X.",
"X.....X.",
"X.....X.",
"........"},
{"XXXXXX..",
"X.....X.",
"X.....X.",
"XXXXXX..",
"X.....X.",
"X.....X.",
"XXXXXX..",
"........"},
{"..XXXX..",
".X....X.",
"X.......",
"X.......",
"X.......",
".X....X.",
"..XXXX..",
"........"},
{"XXXXX...",
"X....X..",
"X.....X.",
"X.....X.",
"X.....X.",
"X....X..",
"XXXXX...",
"........"},
{"XXXXXXX.",
"X.......",
"X.......",
"XXXXXX..",
"X.......",
"X.......",
"XXXXXXX.",
"........"},
{"XXXXXXX.",
"X.......",
"X.......",
"XXXXXX..",
"X.......",
"X.......",
"X.......",
"........"},
{".XXXXX..",
"X.......",
"X.......",
"X..XXXX.",
"X.....X.",
"X.....X.",
".XXXXX..",
"........"},
{"X.....X.",
"X.....X.",
"X.....X.",
"XXXXXXX.",
"X.....X.",
"X.....X.",
"X.....X.",
"........"},
{"..XXX...",
"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"..XXX...",
"........"},
{"...XXX..",
"....X...",
"....X...",
"....X...",
"....X...",
"X...X...",
".XXX....",
"........"},
{"X....XX.",
"X...XX..",
"X..XX...",
"XXX.....",
"X..XX...",
"X...XX..",
"X....XX.",
"........"},
{"X.......",
"X.......",
"X.......",
"X.......",
"X.......",
"X.......",
"XXXXXXX.",
"........"},
{"X.....X.",
"XX...XX.",
"X.X.X.X.",
"X..X..X.",
"X.....X.",
"X.....X.",
"X.....X.",
"........"},
{"X.....X.",
"XX....X.",
"X.X...X.",
"X..X..X.",
"X...X.X.",
"X....XX.",
"X.....X.",
"........"},
{".XXXXX..",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
".XXXXX..",
"........"},
{"XXXXXX..",
"X.....X.",
"X.....X.",
"XXXXXX..",
"X.......",
"X.......",
"X.......",
"........"},
{".XXXXX..",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"X...X.X.",
".XXXXX..",
"......X."},
{"XXXXXX..",
"X.....X.",
"X.....X.",
"XXXXXX..",
"X...X...",
"X....X..",
"X.....X.",
"........"},
{".XXXXX..",
"X.....X.",
"X.......",
".XXXXX..",
"......X.",
"X.....X.",
".XXXXX..",
"........"},
{"XXXXXXX.",
"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"........"},
{"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
".XXXXX..",
"........"},
{"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
".X...X..",
"..X.X...",
"...X....",
"........"},
{"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"X..X..X.",
"X.X.X.X.",
".X...X..",
"........"},
{"X.....X.",
".X...X..",
"..X.X...",
"...X....",
"..X.X...",
".X...X..",
"X.....X.",
"........"},
{"X.....X.",
".X...X..",
"..X.X...",
"...X....",
"...X....",
"...X....",
"...X....",
"........"},
{"XXXXXXX.",
".....X..",
"....X...",
"...X....",
"..X.....",
".X......",
"XXXXXXX.",
"........"},
{"..XXXX..",
"..X.....",
"..X.....",
"..X.....",
"..X.....",
"..X.....",
"..XXXX..",
"........"},
{"X.......",
".X......",
"..X.....",
"...X....",
"....X...",
".....X..",
"......X.",
"........"},
{"..XXXX..",
".....X..",
".....X..",
".....X..",
".....X..",
".....X..",
"..XXXX..",
"........"},
{"...X....",
"..X.X...",
".X...X..",
"........",
"........",
"........",
"........",
"........"},
{"........",
"........",
"........",
"........",
"........",
"........",
"XXXXXXX.",
"........"},
{"...X....",
"....X...",
"........",
"........",
"........",
"........",
"........",
"........"},
{"........",
"........",
".XXXX...",
".....X..",
".XXXXX..",
"X....X..",
".XXXX.X.",
"........"},
{"X.......",
"X.......",
"X.......",
"XXXXXX..",
"X.....X.",
"X.....X.",
"XXXXXX..",
"........"},
{"........",
"........",
".XXXXXX.",
"X.......",
"X.......",
"X.......",
".XXXXXX.",
"........"},
{"......X.",
"......X.",
"......X.",
".XXXXXX.",
"X.....X.",
"X.....X.",
".XXXXXX.",
"........"},
{"........",
"........",
".XXXXX..",
"X.....X.",
"XXXXXXX.",
"X.......",
".XXXXX..",
"........"},
{"...XX...",
"..X.....",
"..X.....",
".XXX....",
"..X.....",
"..X.....",
"..X.....",
"........"},
{"........",
"........",
".XXXXX..",
"X.....X.",
"X.....X.",
".XXXXXX.",
"......X.",
".XXXXX.."},
{"X.......",
"X.......",
"X.......",
"XXXXXX..",
"X.....X.",
"X.....X.",
"X.....X.",
"........"},
{"........",
"...X....",
"........",
"..XX....",
"...X....",
"...X....",
"..XXX...",
"........"},
{"........",
"....X...",
"........",
"...XX...",
"....X...",
"....X...",
"....X...",
"..XX...."},
{"X.......",
"X.......",
"X....XX.",
"X..XX...",
"XXX.....",
"X..XX...",
"X....XX.",
"........"},
{"..XX....",
"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"..XXX...",
"........"},
{"........",
"........",
".XX.XX..",
"X..X..X.",
"X..X..X.",
"X..X..X.",
"X..X..X.",
"........"},
{"........",
"........",
".XXXXX..",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"........"},
{"........",
"........",
".XXXXX..",
"X.....X.",
"X.....X.",
"X.....X.",
".XXXXX..",
"........"},
{"........",
"........",
"XXXXXX..",
"X.....X.",
"X.....X.",
"XXXXXX..",
"X.......",
"X......."},
{"........",
"........",
".XXXXXX.",
"X.....X.",
"X.....X.",
".XXXXXX.",
"......X.",
"......X."},
{"........",
"........",
"X.XXXX..",
"XX....X.",
"X.......",
"X.......",
"X.......",
"........"},
{"........",
"........",
".XXXXX..",
"X.......",
".XXXXX..",
"......X.",
".XXXXX..",
"........"},
{"...X....",
"...X....",
"...X....",
".XXXXX..",
"...X....",
"...X....",
"....XX..",
"........"},
{"........",
"........",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
".XXXXX..",
"........"},
{"........",
"........",
"X.....X.",
"X.....X.",
".X...X..",
"..X.X...",
"...X....",
"........"},
{"........",
"........",
"X.....X.",
"X.....X.",
"X..X..X.",
"X.X.X.X.",
".X...X..",
"........"},
{"........",
"........",
"X.....X.",
".XX.XX..",
"...X....",
".XX.XX..",
"X.....X.",
"........"},
{"........",
"........",
"X.....X.",
"X.....X.",
"X.....X.",
".XXXXXX.",
"......X.",
".XXXXX.."},
{"........",
"........",
"XXXXXXX.",
"....XX..",
"...X....",
".XX.....",
"XXXXXXX.",
"........"},
{"...XX...",
"..X.....",
"..X.....",
"XX......",
"..X.....",
"..X.....",
"...XX...",
"........"},
{"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"...X....",
"...X...."},
{"...XX...",
".....X..",
".....X..",
"......XX",
".....X..",
".....X..",
"...XX...",
"........"},
{"........",
"........",
"..X.....",
".X.XX.X.",
".....X..",
"........",
"........",
"........"},
{"XXXXXXX.",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"X.....X.",
"XXXXXXX.",
"........"},
};
public static void main(final String[] args) {
String[] letter;
int b;
int v;
int c = 0;
int n = -1;
int m = -1;
String s;
//check if font has correct number of letters
if(font.length != 96){
System.out.println("\nWARNING: Font has "+font.length+" letters, should have 96 letters.");
System.out.println(" Font will be invalid!\n");
}
for (int i = 0; i < font.length; i++) {
letter = font[i];
b = 0;
//check if all letters have same numbers of lines
if((letter.length != n) && (n == -1)) {
n = letter.length;
}
else if(letter.length != n) {
System.out.println("\n\nWARNING: Letter number "+i+" has different number of lines ("+letter.length+") than predecessors ("+n+").");
System.out.println(" Font will be invalid!\n");
}
for (int j = 0; j < letter.length; j++) {
b = b << letter[j].length();
v = 16;
//check if all letters have same number of lines
if((letter[j].length() != m) && (m == -1)) {
m=letter[j].length();
}
else if(letter[j].length() != m) {
System.out.println("\n\nWARNING: Letter number "+i+" has a different number of bits ("+letter[j].length()+") than predecessors ("+m+") in line "+j+".");
System.out.println(" Font will be invalid!\n");
}
for (int col = 0; col < letter[j].length(); col++) {
if (letter[j].charAt(col) == 'X') b += v;
v = v >> 1;
}
}
s = Long.toHexString(b).toUpperCase();
while (s.length() < (letter.length*m/8)) s = "0" + s;
System.out.print("0x" + s + ",");
c++;
if (c >= 8) {
System.out.println();
c = 0;
}
}
}
}
| yacy/yacy_search_server | source/net/yacy/visualization/FontGenerator8Pixel.java |
1,470 | /*
* Copyright 2015 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright 2015 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.netty.handler.codec.http2;
import io.netty.util.AsciiString;
import io.netty.util.internal.UnstableApi;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
/**
* Helper class representing a single header entry. Used by the benchmarks.
*/
@UnstableApi
public final class HpackHeader {
private static final String ALPHABET =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_";
final CharSequence name;
final CharSequence value;
private HpackHeader(byte[] name, byte[] value) {
this.name = new AsciiString(name, false);
this.value = new AsciiString(value, false);
}
/**
* Creates a number of random headers with the given name/value lengths.
*/
static List<HpackHeader> createHeaders(int numHeaders, int nameLength, int valueLength,
boolean limitToAscii) {
List<HpackHeader> hpackHeaders = new ArrayList<HpackHeader>(numHeaders);
for (int i = 0; i < numHeaders; ++i) {
// Force always ascii for header names
byte[] name = randomBytes(new byte[nameLength], true);
byte[] value = randomBytes(new byte[valueLength], limitToAscii);
hpackHeaders.add(new HpackHeader(name, value));
}
return hpackHeaders;
}
private static byte[] randomBytes(byte[] bytes, boolean limitToAscii) {
Random r = new Random();
if (limitToAscii) {
for (int index = 0; index < bytes.length; ++index) {
int charIndex = r.nextInt(ALPHABET.length());
bytes[index] = (byte) ALPHABET.charAt(charIndex);
}
} else {
r.nextBytes(bytes);
}
return bytes;
}
}
| netty/netty | microbench/src/main/java/io/netty/handler/codec/http2/HpackHeader.java |
1,471 | /**
* Copyright (C) the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ninja.servlet;
import com.google.inject.servlet.GuiceFilter;
import java.io.IOException;
import java.security.Principal;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import javax.websocket.Endpoint;
import ninja.Result;
import ninja.Route;
import ninja.utils.HttpHeaderConstants;
import ninja.websockets.jsr356.Jsr356Handshake;
import ninja.websockets.jsr356.Jsr356HandshakePrincipal;
import ninja.websockets.jsr356.Jsr356HandshakeThreadLocal;
import ninja.websockets.WebSocketUtils;
/**
* Servlet filter that calls into a wrapped Guice filter which in turn will
* call into Ninja to handle requests. If a websocket handshake is detected
* there is some logic to first delegate it to Ninja then hand if off to the
* container for further processing.
*
* @author jjlauer
*/
public class NinjaServletFilter implements Filter {
private final GuiceFilter wrapped;
public NinjaServletFilter() {
this.wrapped = new GuiceFilter();
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
this.wrapped.init(filterConfig);
}
@Override
public void destroy() {
this.wrapped.destroy();
}
private boolean isWebSocketHandshake(HttpServletRequest httpRequest) {
String upgradeHeader = httpRequest.getHeader("Upgrade");
return upgradeHeader != null && "websocket".equalsIgnoreCase(upgradeHeader);
}
public HttpServletRequest buildWebSocketHandshakeHttpRequest(HttpServletRequest httpRequest, Jsr356Handshake handshake) {
// save handshake as attribute so it can be used in ninja
httpRequest.setAttribute(WebSocketUtils.ATTRIBUTE_HANDSHAKE, handshake);
// modify http method to be WS so ninja can do a route lookup
return new HttpServletRequestWrapper(httpRequest) {
@Override
public String getMethod() {
return Route.HTTP_METHOD_WEBSOCKET;
}
};
}
public HttpServletRequest buildWebSocketUpgradeHttpRequest(HttpServletRequest httpRequest, Jsr356Handshake handshake) {
// create a "UserPrincipal" that is actually storage of the handshake
final Jsr356HandshakePrincipal principal
= new Jsr356HandshakePrincipal(httpRequest.getUserPrincipal(), handshake);
// create a new wrapped servlet request with this new principal
return new HttpServletRequestWrapper(httpRequest) {
@Override
public Principal getUserPrincipal() {
return principal;
}
};
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
HttpServletRequest httpRequest = (HttpServletRequest)request;
HttpServletResponse httpResponse = (HttpServletResponse)response;
if (!isWebSocketHandshake(httpRequest)) {
// handle normally
wrapped.doFilter(request, response, chain);
} else {
// create handshake we'll use to help process along the way
final Jsr356Handshake handshake = new Jsr356Handshake();
// parse requested protocols via header
handshake.setRequestedProtocols(
WebSocketUtils.parseProtocolRequestHeader(
httpRequest.getHeader(HttpHeaderConstants.SEC_WEBSOCKET_PROTOCOL)));
// wrap request to mark it as a websocket and handoff to ninja
wrapped.doFilter(buildWebSocketHandshakeHttpRequest(httpRequest, handshake), response, chain);
// continue with handshake by moving onto next filter in container?
if (httpResponse.getStatus() == Result.SC_101_SWITCHING_PROTOCOLS) {
// Due to design flaw with JSR-356 - we will passthru some objects
// to the eventual endpoint by using both a thread local and then
// also by hijacking the "UserPrincipal" object on the request.
// Various servlet containers may handoff the "onOpen" of a websocket
// to a new thread pool that wouldn't have the thread local context
// get endpoint from current request and save in handshake
handshake.setEndpoint((Endpoint)httpRequest
.getAttribute(WebSocketUtils.ATTRIBUTE_ENDPOINT));
// assign to current thread local
Jsr356HandshakeThreadLocal.set(handshake);
try {
// stuff handshake as "UserPrincipal" so it can be used by endpoint
httpRequest = this.buildWebSocketUpgradeHttpRequest(httpRequest, handshake);
chain.doFilter(httpRequest, response);
} finally {
Jsr356HandshakeThreadLocal.remove();
}
}
}
}
} | ninjaframework/ninja | ninja-servlet/src/main/java/ninja/servlet/NinjaServletFilter.java |
1,472 | package nginx.clojure;
import java.util.AbstractMap.SimpleEntry;
import java.util.List;
public interface NginxRequest {
public long nativeRequest();
public boolean isReleased();
public void tagReleased();
public void markReqeased();
//for safe access with another thread
public void prefetchAll();
public void prefetchAll(String[] headers, String[] variables, String[] outHeaders);
public void applyDelayed();
public NginxHandler handler();
public NginxHttpServerChannel channel();
public boolean isHijacked();
public int phase();
public List<SimpleEntry<Object, ChannelListener<Object>>> listeners();
public String uri();
public <T> void addListener(T data, ChannelListener<T> listener);
public boolean isWebSocket();
public long nativeCount();
public long nativeCount(long c);
public int getAndIncEvalCount();
public NginxHttpServerChannel hijack(boolean ignoreFilter);
public long discardRequestBody();
public String getVariable(String name);
public String getVariable(String name, String defaultVal);
public int setVariable(String name, String value);
}
| nginx-clojure/nginx-clojure | src/java/nginx/clojure/NginxRequest.java |
1,473 | /*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* Copyright (c) 2011-2012, Stephen Colebourne & Michael Nascimento Santos
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of JSR-310 nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package java.time.chrono;
import java.io.Externalizable;
import java.io.IOException;
import java.io.InvalidClassException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
import java.io.StreamCorruptedException;
import java.time.LocalDate;
import java.time.LocalDateTime;
/**
* The shared serialization delegate for this package.
*
* @implNote
* This class wraps the object being serialized, and takes a byte representing the type of the class to
* be serialized. This byte can also be used for versioning the serialization format. In this case another
* byte flag would be used in order to specify an alternative version of the type format.
* For example {@code CHRONO_TYPE_VERSION_2 = 21}
* <p>
* In order to serialize the object it writes its byte and then calls back to the appropriate class where
* the serialization is performed. In order to deserialize the object it read in the type byte, switching
* in order to select which class to call back into.
* <p>
* The serialization format is determined on a per class basis. In the case of field based classes each
* of the fields is written out with an appropriate size format in descending order of the field's size. For
* example in the case of {@link LocalDate} year is written before month. Composite classes, such as
* {@link LocalDateTime} are serialized as one object. Enum classes are serialized using the index of their
* element.
* <p>
* This class is mutable and should be created once per serialization.
*
* @serial include
* @since 1.8
*/
final class Ser implements Externalizable {
/**
* Serialization version.
*/
@java.io.Serial
private static final long serialVersionUID = -6103370247208168577L;
static final byte CHRONO_TYPE = 1;
static final byte CHRONO_LOCAL_DATE_TIME_TYPE = 2;
static final byte CHRONO_ZONE_DATE_TIME_TYPE = 3;
static final byte JAPANESE_DATE_TYPE = 4;
static final byte JAPANESE_ERA_TYPE = 5;
static final byte HIJRAH_DATE_TYPE = 6;
static final byte MINGUO_DATE_TYPE = 7;
static final byte THAIBUDDHIST_DATE_TYPE = 8;
static final byte CHRONO_PERIOD_TYPE = 9;
/** The type being serialized. */
private byte type;
/** The object being serialized. */
private Serializable object;
/**
* Constructor for deserialization.
*/
public Ser() {
}
/**
* Creates an instance for serialization.
*
* @param type the type
* @param object the object
*/
Ser(byte type, Serializable object) {
this.type = type;
this.object = object;
}
//-----------------------------------------------------------------------
/**
* Implements the {@code Externalizable} interface to write the object.
* @serialData
* Each serializable class is mapped to a type that is the first byte
* in the stream. Refer to each class {@code writeReplace}
* serialized form for the value of the type and sequence of values for the type.
* <ul>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahChronology">HijrahChronology.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.IsoChronology">IsoChronology.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseChronology">JapaneseChronology.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoChronology">MinguoChronology.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistChronology">ThaiBuddhistChronology.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoLocalDateTimeImpl">ChronoLocalDateTime.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoZonedDateTimeImpl">ChronoZonedDateTime.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseDate">JapaneseDate.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseEra">JapaneseEra.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahDate">HijrahDate.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoDate">MinguoDate.writeReplace</a>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistDate">ThaiBuddhistDate.writeReplace</a>
* </ul>
*
* @param out the data stream to write to, not null
*/
@Override
public void writeExternal(ObjectOutput out) throws IOException {
writeInternal(type, object, out);
}
private static void writeInternal(byte type, Object object, ObjectOutput out) throws IOException {
out.writeByte(type);
switch (type) {
case CHRONO_TYPE:
((AbstractChronology) object).writeExternal(out);
break;
case CHRONO_LOCAL_DATE_TIME_TYPE:
((ChronoLocalDateTimeImpl<?>) object).writeExternal(out);
break;
case CHRONO_ZONE_DATE_TIME_TYPE:
((ChronoZonedDateTimeImpl<?>) object).writeExternal(out);
break;
case JAPANESE_DATE_TYPE:
((JapaneseDate) object).writeExternal(out);
break;
case JAPANESE_ERA_TYPE:
((JapaneseEra) object).writeExternal(out);
break;
case HIJRAH_DATE_TYPE:
((HijrahDate) object).writeExternal(out);
break;
case MINGUO_DATE_TYPE:
((MinguoDate) object).writeExternal(out);
break;
case THAIBUDDHIST_DATE_TYPE:
((ThaiBuddhistDate) object).writeExternal(out);
break;
case CHRONO_PERIOD_TYPE:
((ChronoPeriodImpl) object).writeExternal(out);
break;
default:
throw new InvalidClassException("Unknown serialized type");
}
}
//-----------------------------------------------------------------------
/**
* Implements the {@code Externalizable} interface to read the object.
* @serialData
* The streamed type and parameters defined by the type's {@code writeReplace}
* method are read and passed to the corresponding static factory for the type
* to create a new instance. That instance is returned as the de-serialized
* {@code Ser} object.
*
* <ul>
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahChronology">HijrahChronology</a> -
* Chronology.of(id)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.IsoChronology">IsoChronology</a> -
* Chronology.of(id)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseChronology">JapaneseChronology</a> -
* Chronology.of(id)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoChronology">MinguoChronology</a> -
* Chronology.of(id)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistChronology">ThaiBuddhistChronology</a> -
* Chronology.of(id)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoLocalDateTimeImpl">ChronoLocalDateTime</a> -
* date.atTime(time)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ChronoZonedDateTimeImpl">ChronoZonedDateTime</a> -
* dateTime.atZone(offset).withZoneSameLocal(zone)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseDate">JapaneseDate</a> -
* JapaneseChronology.INSTANCE.date(year, month, dayOfMonth)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.JapaneseEra">JapaneseEra</a> -
* JapaneseEra.of(eraValue)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.HijrahDate">HijrahDate</a> -
* HijrahChronology chrono.date(year, month, dayOfMonth)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.MinguoDate">MinguoDate</a> -
* MinguoChronology.INSTANCE.date(year, month, dayOfMonth)
* <li><a href="{@docRoot}/serialized-form.html#java.time.chrono.ThaiBuddhistDate">ThaiBuddhistDate</a> -
* ThaiBuddhistChronology.INSTANCE.date(year, month, dayOfMonth)
* </ul>
*
* @param in the data stream to read from, not null
*/
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
type = in.readByte();
object = readInternal(type, in);
}
static Serializable read(ObjectInput in) throws IOException, ClassNotFoundException {
byte type = in.readByte();
return readInternal(type, in);
}
private static Serializable readInternal(byte type, ObjectInput in)
throws IOException, ClassNotFoundException {
return switch (type) {
case CHRONO_TYPE -> (Serializable) AbstractChronology.readExternal(in);
case CHRONO_LOCAL_DATE_TIME_TYPE -> (Serializable) ChronoLocalDateTimeImpl.readExternal(in);
case CHRONO_ZONE_DATE_TIME_TYPE -> (Serializable) ChronoZonedDateTimeImpl.readExternal(in);
case JAPANESE_DATE_TYPE -> JapaneseDate.readExternal(in);
case JAPANESE_ERA_TYPE -> JapaneseEra.readExternal(in);
case HIJRAH_DATE_TYPE -> HijrahDate.readExternal(in);
case MINGUO_DATE_TYPE -> MinguoDate.readExternal(in);
case THAIBUDDHIST_DATE_TYPE -> ThaiBuddhistDate.readExternal(in);
case CHRONO_PERIOD_TYPE -> ChronoPeriodImpl.readExternal(in);
default -> throw new StreamCorruptedException("Unknown serialized type");
};
}
/**
* Returns the object that will replace this one.
*
* @return the read object, should never be null
*/
@java.io.Serial
private Object readResolve() {
return object;
}
}
| openjdk/jdk | src/java.base/share/classes/java/time/chrono/Ser.java |
1,474 | /*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Modifications Copyright OpenSearch Contributors. See
* GitHub history for details.
*/
package org.opensearch.common.util.concurrent;
import org.apache.logging.log4j.Logger;
import org.opensearch.common.annotation.PublicApi;
import org.opensearch.common.collect.Tuple;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.Semaphore;
import java.util.function.Consumer;
import java.util.function.Supplier;
/**
* This async IO processor allows to batch IO operations and have a single writer processing the write operations.
* This can be used to ensure that threads can continue with other work while the actual IO operation is still processed
* by a single worker. A worker in this context can be any caller of the {@link #put(Object, Consumer)} method since it will
* hijack a worker if nobody else is currently processing queued items. If the internal queue has reached it's capacity incoming threads
* might be blocked until other items are processed
*
* @opensearch.api
*/
@PublicApi(since = "1.0.0")
public abstract class AsyncIOProcessor<Item> {
private final Logger logger;
private final ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> queue;
private final ThreadContext threadContext;
private final Semaphore promiseSemaphore = new Semaphore(1);
private long lastRunStartTimeInNs;
protected AsyncIOProcessor(Logger logger, int queueSize, ThreadContext threadContext) {
this.logger = logger;
this.queue = new ArrayBlockingQueue<>(queueSize);
this.threadContext = threadContext;
}
/**
* Adds the given item to the queue. The listener is notified once the item is processed
*/
public void put(Item item, Consumer<Exception> listener) {
Objects.requireNonNull(item, "item must not be null");
Objects.requireNonNull(listener, "listener must not be null");
// the algorithm here tires to reduce the load on each individual caller.
// we try to have only one caller that processes pending items to disc while others just add to the queue but
// at the same time never overload the node by pushing too many items into the queue.
// we first try make a promise that we are responsible for the processing
final boolean promised = promiseSemaphore.tryAcquire();
if (promised == false) {
// in this case we are not responsible and can just block until there is space
addToQueue(item, listener);
}
// here we have to try to make the promise again otherwise there is a race when a thread puts an entry without making the promise
// while we are draining that mean we might exit below too early in the while loop if the drainAndSync call is fast.
if (promised || promiseSemaphore.tryAcquire()) {
final List<Tuple<Item, Consumer<Exception>>> candidates = new ArrayList<>();
if (promised) {
// we are responsible for processing we don't need to add the tuple to the queue we can just add it to the candidates
// no need to preserve context for listener since it runs in current thread.
candidates.add(new Tuple<>(item, listener));
}
// since we made the promise to process we gotta do it here at least once
drainAndProcessAndRelease(candidates);
while (queue.isEmpty() == false && promiseSemaphore.tryAcquire()) {
// yet if the queue is not empty AND nobody else has yet made the promise to take over we continue processing
drainAndProcessAndRelease(candidates);
}
}
}
void addToQueue(Item item, Consumer<Exception> listener) {
try {
queue.put(new Tuple<>(item, preserveContext(listener)));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
listener.accept(e);
}
}
void drainAndProcessAndRelease(List<Tuple<Item, Consumer<Exception>>> candidates) {
lastRunStartTimeInNs = System.nanoTime();
Exception exception;
try {
queue.drainTo(candidates);
exception = processList(candidates);
} finally {
promiseSemaphore.release();
}
notifyList(candidates, exception);
candidates.clear();
}
private Exception processList(List<Tuple<Item, Consumer<Exception>>> candidates) {
Exception exception = null;
if (candidates.isEmpty() == false) {
try {
write(candidates);
} catch (Exception ex) { // if this fails we are in deep shit - fail the request
logger.debug("failed to write candidates", ex);
// this exception is passed to all listeners - we don't retry. if this doesn't work we are in deep shit
exception = ex;
}
}
return exception;
}
void notifyList(List<Tuple<Item, Consumer<Exception>>> candidates, Exception exception) {
for (Tuple<Item, Consumer<Exception>> tuple : candidates) {
Consumer<Exception> consumer = tuple.v2();
try {
consumer.accept(exception);
} catch (Exception ex) {
logger.warn("failed to notify callback", ex);
}
}
}
Consumer<Exception> preserveContext(Consumer<Exception> consumer) {
Supplier<ThreadContext.StoredContext> restorableContext = threadContext.newRestorableContext(false);
return e -> {
try (ThreadContext.StoredContext ignore = restorableContext.get()) {
consumer.accept(e);
}
};
}
/**
* Writes or processes the items out or to disk.
*/
protected abstract void write(List<Tuple<Item, Consumer<Exception>>> candidates) throws IOException;
Logger getLogger() {
return logger;
}
Semaphore getPromiseSemaphore() {
return promiseSemaphore;
}
long getLastRunStartTimeInNs() {
return lastRunStartTimeInNs;
}
ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> getQueue() {
return queue;
}
}
| opensearch-project/OpenSearch | server/src/main/java/org/opensearch/common/util/concurrent/AsyncIOProcessor.java |
1,475 | /*
* Copyright 2000-2022 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.server;
import java.io.IOException;
import com.vaadin.shared.extension.filedownloader.FileDownloaderState;
import com.vaadin.ui.AbstractComponent;
/**
* Extension that starts a download when the extended component is clicked. This
* is used to overcome two challenges:
* <ul>
* <li>Resource should be bound to a component to allow it to be garbage
* collected when there are no longer any ways of reaching the resource.</li>
* <li>Download should be started directly when the user clicks e.g. a Button
* without going through a server-side click listener to avoid triggering
* security warnings in some browsers.</li>
* </ul>
* <p>
* Please note that the download will be started in an iframe, which means that
* care should be taken to avoid serving content types that might make the
* browser attempt to show the content using a plugin instead of downloading it.
* Connector resources (e.g. {@link FileResource} and {@link ClassResource})
* will automatically be served using a
* <code>Content-Type: application/octet-stream</code> header unless
* {@link #setOverrideContentType(boolean)} has been set to <code>false</code>
* while files served in other ways, (e.g. {@link ExternalResource} or
* {@link ThemeResource}) will not automatically get this treatment.
* </p>
*
* @author Vaadin Ltd
* @since 7.0.0
*/
public class FileDownloader extends AbstractExtension {
private boolean overrideContentType = true;
/**
* Creates a new file downloader for the given resource. To use the
* downloader, you should also {@link #extend(AbstractClientConnector)} the
* component.
*
* @param resource
* the resource to download when the user clicks the extended
* component.
*/
public FileDownloader(Resource resource) {
if (resource == null) {
throw new IllegalArgumentException("resource may not be null");
}
setResource("dl", resource);
}
/**
* Add this extension to the target component.
*
* @param target
* the component to attach this extension to
*/
public void extend(AbstractComponent target) {
super.extend(target);
}
/**
* Add this extension to the {@code EventTrigger}.
*
* @param eventTrigger
* the trigger to attach this extension to
* @since 8.4
*/
public void extend(EventTrigger eventTrigger) {
super.extend(eventTrigger.getConnector());
getState().partInformation = eventTrigger.getPartInformation();
}
/**
* Gets the resource set for download.
*
* @return the resource that will be downloaded if clicking the extended
* component
*/
public Resource getFileDownloadResource() {
return getResource("dl");
}
/**
* Sets the resource that is downloaded when the extended component is
* clicked.
*
* @param resource
* the resource to download
*/
public void setFileDownloadResource(Resource resource) {
setResource("dl", resource);
}
/**
* Sets whether the content type of served resources should be overridden to
* <code>application/octet-stream</code> to reduce the risk of a browser
* plugin choosing to display the resource instead of downloading it. This
* is by default set to <code>true</code>.
* <p>
* Please note that this only affects Connector resources (e.g.
* {@link FileResource} and {@link ClassResource}) but not other resource
* types (e.g. {@link ExternalResource} or {@link ThemeResource}).
* </p>
*
* @param overrideContentType
* <code>true</code> to override the content type if possible;
* <code>false</code> to use the original content type.
*/
public void setOverrideContentType(boolean overrideContentType) {
this.overrideContentType = overrideContentType;
}
/**
* Checks whether the content type should be overridden.
*
* @return <code>true</code> if the content type will be overridden when
* possible; <code>false</code> if the original content type will be
* used.
* @see #setOverrideContentType(boolean)
*/
public boolean isOverrideContentType() {
return overrideContentType;
}
/**
* {@inheritDoc}
*
* @throws IOException
* if something goes wrong with the download or the user
* cancelled the file download process.
*/
@Override
public boolean handleConnectorRequest(VaadinRequest request,
VaadinResponse response, String path) throws IOException {
if (!path.matches("dl(/.*)?")) {
// Ignore if it isn't for us
return false;
}
VaadinSession session = getSession();
session.lock();
DownloadStream stream;
try {
Resource resource = getFileDownloadResource();
if (!(resource instanceof ConnectorResource)) {
return false;
}
stream = ((ConnectorResource) resource).getStream();
String contentDisposition = stream
.getParameter(DownloadStream.CONTENT_DISPOSITION);
if (contentDisposition == null) {
contentDisposition = "attachment; " + DownloadStream
.getContentDispositionFilename(stream.getFileName());
}
stream.setParameter(DownloadStream.CONTENT_DISPOSITION,
contentDisposition);
// Content-Type to block eager browser plug-ins from hijacking
// the file
if (isOverrideContentType()) {
stream.setContentType("application/octet-stream;charset=UTF-8");
}
} finally {
session.unlock();
}
stream.writeResponse(request, response);
return true;
}
@Override
protected FileDownloaderState getState() {
return (FileDownloaderState) super.getState();
}
@Override
protected FileDownloaderState getState(boolean markAsDirty) {
return (FileDownloaderState) super.getState(markAsDirty);
}
}
| vaadin/framework | server/src/main/java/com/vaadin/server/FileDownloader.java |
1,476 | /*
* The MIT License
*
* Copyright (c) 2015-2020 The Broad Institute
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package picard.sam;
import htsjdk.samtools.DownsamplingIterator;
import htsjdk.samtools.DownsamplingIteratorFactory;
import htsjdk.samtools.DownsamplingIteratorFactory.Strategy;
import htsjdk.samtools.SAMFileHeader;
import htsjdk.samtools.SAMFileWriter;
import htsjdk.samtools.SAMFileWriterFactory;
import htsjdk.samtools.SAMProgramRecord;
import htsjdk.samtools.SAMRecord;
import htsjdk.samtools.SamInputResource;
import htsjdk.samtools.SamReader;
import htsjdk.samtools.SamReaderFactory;
import htsjdk.samtools.metrics.MetricsFile;
import htsjdk.samtools.util.CloserUtil;
import htsjdk.samtools.util.IOUtil;
import htsjdk.samtools.util.Log;
import htsjdk.samtools.util.ProgressLogger;
import org.broadinstitute.barclay.argparser.Argument;
import org.broadinstitute.barclay.argparser.CommandLineProgramProperties;
import org.broadinstitute.barclay.help.DocumentedFeature;
import picard.PicardException;
import picard.analysis.CollectQualityYieldMetrics.QualityYieldMetrics;
import picard.analysis.CollectQualityYieldMetrics.QualityYieldMetricsCollector;
import picard.cmdline.CommandLineProgram;
import picard.cmdline.StandardOptionDefinitions;
import picard.cmdline.argumentcollections.ReferenceArgumentCollection;
import picard.cmdline.programgroups.ReadDataManipulationProgramGroup;
import picard.nio.PicardBucketUtils;
import picard.nio.PicardHtsPath;
import java.io.BufferedWriter;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.text.DecimalFormat;
import java.text.NumberFormat;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
/**
* <h3>Summary</h3>
* This tool applies a downsampling algorithm to a SAM or BAM file to retain only a (deterministically random) subset of
* the reads. Reads from the same template (e.g. read-pairs, secondary and supplementary reads) are all either kept or
* discarded as a unit, with the goal of retaining reads from <code>PROBABILITY * (input templates)</code>. The results
* will contain approximately <code>PROBABILITY * (input reads)</code>, however for very small
* probabilities this may not be the case.
*
* A number of different downsampling strategies are supported using the {@link #STRATEGY} option:
* <dl>
* <dt>ConstantMemory</dt>
* <dd>
* Downsamples a stream or file of SAMRecords using a hash-projection strategy such that it can run in constant memory.
* The downsampling is stochastic, and therefore the actual retained proportion will vary around the requested proportion. Due
* to working in fixed memory this strategy is good for large inputs, and due to the stochastic nature the accuracy of this strategy
* is highest with a high number of output records, and diminishes at low output volumes.
* </dd>
* <dt>HighAccuracy</dt>
* <dd>
* Attempts (but does not guarantee) to provide accuracy up to a specified limit. Accuracy is defined as emitting
* a proportion of reads as close to the requested proportion as possible. In order to do so this strategy requires
* memory that is proportional to the number of template names in the incoming stream of reads, and will thus require
* large amounts of memory when running on large input files.
* </dd>
* <dt>Chained</dt>
* <dd>
* Attempts to provide a compromise strategy that offers some of the advantages of both the ConstantMemory and HighAccuracy strategies.
* Uses a ConstantMemory strategy to downsample the incoming stream to approximately the desired proportion, and then a HighAccuracy
* strategy to finish. Works in a single pass, and will provide accuracy close to (but often not as good as) HighAccuracy while requiring
* memory proportional to the set of reads emitted from the ConstantMemory strategy to the HighAccuracy strategy. Works well when downsampling
* large inputs to small proportions (e.g. downsampling hundreds of millions of reads and retaining only 2%. Should be accurate 99.9% of the time
* when the input contains more than 50,000 templates (read names). For smaller inputs, HighAccuracy is recommended instead.
* </dd>
* </dl>
*
* The number of records written can be output to a {@link QualityYieldMetrics} metrics file via the {@link #METRICS_FILE}.
*
* <h3>Usage examples:</h3>
* <h4>Downsample file, keeping about 10% of the reads</h4>
* <pre>
* java -jar picard.jar DownsampleSam \
* I=input.bam \
* O=downsampled.bam \
* P=0.1
* </pre>
*
* <h4>Downsample file, keeping 2% of the reads </h4>
* <pre>
* java -jar picard.jar DownsampleSam \
* I=input.bam \
* O=downsampled.bam \
* STRATEGY=Chained \
* P=0.02 \
* ACCURACY=0.0001
* </pre>
*
* <h4>Downsample file, keeping 0.001% of the reads (may require more memory)</h4>
* <pre>
* java -jar picard.jar DownsampleSam \
* I=input.bam \
* O=downsampled.bam \
* STRATEGY=HighAccuracy \
* P=0.00001 \
* ACCURACY=0.0000001
* </pre>
*
* @author Tim Fennell
*/
@CommandLineProgramProperties(
summary = DownsampleSam.USAGE_SUMMARY + DownsampleSam.USAGE_DETAILS,
oneLineSummary = DownsampleSam.USAGE_SUMMARY,
programGroup = ReadDataManipulationProgramGroup.class)
@DocumentedFeature
public class DownsampleSam extends CommandLineProgram {
final String PG_PROGRAM_NAME = getClass().getSimpleName();
static final String USAGE_SUMMARY = "Downsample a SAM or BAM file.";
static final String USAGE_DETAILS = "This tool applies a downsampling algorithm to a SAM or BAM file to retain " +
"only a (deterministically random) subset of the reads. Reads from the same template (e.g. read-pairs, secondary " +
"and supplementary reads) are all either kept or discarded as a unit, with the goal of retaining reads" +
"from PROBABILITY * input templates. The results will contain approximately " +
"PROBABILITY * input reads, however for very small PROBABILITIES this may not " +
"be the case.\n" +
"A number of different downsampling strategies are supported using the STRATEGY option:\n\n" +
"ConstantMemory:\n " + DownsamplingIteratorFactory.CONSTANT_MEMORY_DESCRPTION + "\n" +
"HighAccuracy:\n " + DownsamplingIteratorFactory.HIGH_ACCURACY_DESCRIPTION + "\n" +
"Chained:\n " + DownsamplingIteratorFactory.CHAINED_DESCRIPTION + "\n" +
"<h3>Usage examples:</h3>\n" +
"<h4>Downsample file, keeping about 10% of the reads</h4>\n"+
"\n"+
"java -jar picard.jar DownsampleSam \\\n" +
" I=input.bam \\\n" +
" O=downsampled.bam \\\n" +
" P=0.2\n"+
"\n" +
"<h3>Downsample file, keeping about 2% of the reads </h3>\n"+
"\n" +
"java -jar picard.jar DownsampleSam \\\n" +
" I=input.bam \\\n" +
" O=downsampled.bam \\\n" +
" STRATEGY=Chained \\\n" +
" P=0.02 \\\n" +
" ACCURACY=0.0001\n" +
"\n" +
"<h3>Downsample file, keeping about 0.001% of the reads (may require more memory)</h3>\n"+
"\n" +
"java -jar picard.jar DownsampleSam \\\n" +
" I=input.bam \\\n" +
" O=downsampled.bam \\\n" +
" STRATEGY=HighAccuracy \\\n" +
" P=0.00001 \\\n" +
" ACCURACY=0.0000001\n";
@Argument(shortName = StandardOptionDefinitions.INPUT_SHORT_NAME, doc = "The input SAM or BAM file to downsample.")
public PicardHtsPath INPUT;
@Argument(shortName = StandardOptionDefinitions.OUTPUT_SHORT_NAME, doc = "The output, downsampled, SAM, BAM or CRAM file to write.")
public PicardHtsPath OUTPUT;
@Argument(shortName="S", doc="The downsampling strategy to use. See usage for discussion.")
public Strategy STRATEGY = Strategy.ConstantMemory;
@Argument(shortName = "R", doc = "Random seed used for deterministic results. " +
"Setting to null will cause multiple invocations to produce different results. The header if the file will be checked for any previous runs " +
"of DownsampleSam. If DownsampleSam has been run before on this data with the same seed, the seed will be updated in a deterministic fashion " +
"so the DownsampleSam will perform correctly, and still deterministically.")
public Integer RANDOM_SEED = 1;
@Argument(shortName = "P", doc = "The probability of keeping any individual read, between 0 and 1.")
public double PROBABILITY = 1;
@Argument(shortName = "A", doc = "The accuracy that the downsampler should try to achieve if the selected strategy supports it. " +
"Note that accuracy is never guaranteed, but some strategies will attempt to provide accuracy within the requested bounds." +
"Higher accuracy will generally require more memory.")
public double ACCURACY = 0.0001;
@Argument(shortName = "M", doc = "The metrics file (of type QualityYieldMetrics) which will contain information about the downsampled file.", optional=true)
public PicardHtsPath METRICS_FILE;
private final Log log = Log.getInstance(DownsampleSam.class);
public static final String RANDOM_SEED_TAG = "rs";
@Override
protected String[] customCommandLineValidation() {
if (PROBABILITY < 0 || PROBABILITY > 1)
return new String[]{"Downsampling requires 0<=PROBABILITY<=1. Found invalid value: " + PROBABILITY};
return super.customCommandLineValidation();
}
@Override
protected int doWork() {
IOUtil.assertFileIsReadable(INPUT.toPath());
if (OUTPUT.getScheme().equals(PicardBucketUtils.FILE_SCHEME)){
IOUtil.assertFileIsWritable(OUTPUT.toPath().toFile());
}
// Warn the user if they are running with P=1 or P=0 (which are legal, but odd)
if (PROBABILITY == 1) {
log.warn("Running DownsampleSam with PROBABILITY=1! This will likely just recreate the input file.");
}
if (PROBABILITY == 0) {
log.warn("Running DownsampleSam with PROBABILITY=0! This will create an empty file.");
}
if (RANDOM_SEED == null) {
RANDOM_SEED = new Random().nextInt();
log.warn(String.format(
"Drawing a random seed because RANDOM_SEED was not set. Set RANDOM_SEED to %s to reproduce these results in the future.", RANDOM_SEED));
}
final SamReader in = SamReaderFactory.makeDefault().referenceSequence(referenceSequence.getReferencePath()).open(SamInputResource.of(INPUT.toPath()));
final SAMFileHeader header = in.getFileHeader().clone();
if (STRATEGY == Strategy.ConstantMemory || STRATEGY == Strategy.Chained) {
//if running using ConstantMemory or Chained strategy, need to check if we have previously run with the same random seed
//collect previously used seeds
final Integer userSeed = RANDOM_SEED;
final Set<Integer> previousSeeds = new HashSet<>();
for (final SAMProgramRecord pg : header.getProgramRecords()) {
if (pg.getProgramName() != null && pg.getProgramName().equals(PG_PROGRAM_NAME)) {
final String previousSeedString = pg.getAttribute(RANDOM_SEED_TAG);
if (previousSeedString == null) {
/* The previous seed was not recorded. In this case, the current seed may be the same as the previous seed,
so we will change it to a randomly selected seed, which is very likely to be unique
*/
RANDOM_SEED = new Random(pg.hashCode()).nextInt();
log.warn("DownsampleSam has been run before on this data, but the previous seed was not recorded. The used seed will be changed to minimize the chance of using" +
" the same seed as in a previous run.");
} else {
previousSeeds.add(Integer.parseInt(previousSeedString));
}
}
}
final Random rnd = new Random(RANDOM_SEED);
while (previousSeeds.contains(RANDOM_SEED)) {
RANDOM_SEED = rnd.nextInt();
log.warn("DownsampleSam has been run before on this data with the seed " + RANDOM_SEED + ". The random seed will be changed to avoid using the " +
"same seed as previously.");
}
if (!userSeed.equals(RANDOM_SEED)) {
log.warn("RANDOM_SEED has been changed to " + RANDOM_SEED + ".");
}
}
final SAMProgramRecord pgRecord = getPGRecord(header);
pgRecord.setAttribute(RANDOM_SEED_TAG, RANDOM_SEED.toString());
header.addProgramRecord(pgRecord);
final SAMFileWriter out = new SAMFileWriterFactory().makeWriter(header, true, OUTPUT.toPath(), referenceSequence.getReferencePath());
final ProgressLogger progress = new ProgressLogger(log, (int) 1e7, "Wrote");
final DownsamplingIterator iterator = DownsamplingIteratorFactory.make(in, STRATEGY, PROBABILITY, ACCURACY, RANDOM_SEED);
final QualityYieldMetricsCollector metricsCollector = new QualityYieldMetricsCollector(true, false, false);
while (iterator.hasNext()) {
final SAMRecord rec = iterator.next();
out.addAlignment(rec);
if (METRICS_FILE != null) metricsCollector.acceptRecord(rec, null);
progress.record(rec);
}
out.close();
CloserUtil.close(in);
final NumberFormat fmt = new DecimalFormat("0.00%");
log.info("Finished downsampling.");
log.info("Kept ", iterator.getAcceptedCount(), " out of ", iterator.getSeenCount(), " reads (", fmt.format(iterator.getAcceptedFraction()), ").");
if (METRICS_FILE != null) {
final MetricsFile<QualityYieldMetrics, Integer> metricsFile = getMetricsFile();
metricsCollector.finish();
metricsCollector.addMetricsToFile(metricsFile);
try (final BufferedWriter writer = Files.newBufferedWriter(METRICS_FILE.toPath())){
metricsFile.write(writer);
} catch (IOException e) {
throw new PicardException("Encountered an error while writing the metrics file: " + METRICS_FILE.getURIString(), e);
}
}
return 0;
}
@Override
protected ReferenceArgumentCollection makeReferenceArgumentCollection() {
// Override to allow "R" to be hijacked for "RANDOM_SEED"
return new ReferenceArgumentCollection() {
@Argument(doc = "The reference sequence file.", optional=true)
public PicardHtsPath REFERENCE_SEQUENCE;
@Override
public File getReferenceFile() {
return ReferenceArgumentCollection.getFileSafe(REFERENCE_SEQUENCE, log);
}
@Override
public PicardHtsPath getHtsPath() {
return REFERENCE_SEQUENCE;
}
};
}
}
| broadinstitute/picard | src/main/java/picard/sam/DownsampleSam.java |
1,477 | package cn.hutool.core.codec;
import cn.hutool.core.util.CharsetUtil;
import cn.hutool.core.util.StrUtil;
import java.nio.charset.Charset;
/**
* Base32 - encodes and decodes RFC4648 Base32 (see https://datatracker.ietf.org/doc/html/rfc4648#section-6 )<br>
* base32就是用32(2的5次方)个特定ASCII码来表示256个ASCII码。<br>
* 所以,5个ASCII字符经过base32编码后会变为8个字符(公约数为40),长度增加3/5.不足8n用“=”补足。<br>
* 根据RFC4648 Base32规范,支持两种模式:
* <ul>
* <li>Base 32 Alphabet (ABCDEFGHIJKLMNOPQRSTUVWXYZ234567)</li>
* <li>"Extended Hex" Base 32 Alphabet (0123456789ABCDEFGHIJKLMNOPQRSTUV)</li>
* </ul>
*
* @author Looly
*/
public class Base32 {
//----------------------------------------------------------------------------------------- encode
/**
* 编码
*
* @param bytes 数据
* @return base32
*/
public static String encode(final byte[] bytes) {
return Base32Codec.INSTANCE.encode(bytes);
}
/**
* base32编码
*
* @param source 被编码的base32字符串
* @return 被加密后的字符串
*/
public static String encode(String source) {
return encode(source, CharsetUtil.CHARSET_UTF_8);
}
/**
* base32编码
*
* @param source 被编码的base32字符串
* @param charset 字符集
* @return 被加密后的字符串
*/
public static String encode(String source, Charset charset) {
return encode(StrUtil.bytes(source, charset));
}
/**
* 编码
*
* @param bytes 数据(Hex模式)
* @return base32
*/
public static String encodeHex(final byte[] bytes) {
return Base32Codec.INSTANCE.encode(bytes, true);
}
/**
* base32编码(Hex模式)
*
* @param source 被编码的base32字符串
* @return 被加密后的字符串
*/
public static String encodeHex(String source) {
return encodeHex(source, CharsetUtil.CHARSET_UTF_8);
}
/**
* base32编码(Hex模式)
*
* @param source 被编码的base32字符串
* @param charset 字符集
* @return 被加密后的字符串
*/
public static String encodeHex(String source, Charset charset) {
return encodeHex(StrUtil.bytes(source, charset));
}
//----------------------------------------------------------------------------------------- decode
/**
* 解码
*
* @param base32 base32编码
* @return 数据
*/
public static byte[] decode(String base32) {
return Base32Codec.INSTANCE.decode(base32);
}
/**
* base32解码
*
* @param source 被解码的base32字符串
* @return 被加密后的字符串
*/
public static String decodeStr(String source) {
return decodeStr(source, CharsetUtil.CHARSET_UTF_8);
}
/**
* base32解码
*
* @param source 被解码的base32字符串
* @param charset 字符集
* @return 被加密后的字符串
*/
public static String decodeStr(String source, Charset charset) {
return StrUtil.str(decode(source), charset);
}
/**
* 解码
*
* @param base32 base32编码
* @return 数据
*/
public static byte[] decodeHex(String base32) {
return Base32Codec.INSTANCE.decode(base32, true);
}
/**
* base32解码
*
* @param source 被解码的base32字符串
* @return 被加密后的字符串
*/
public static String decodeStrHex(String source) {
return decodeStrHex(source, CharsetUtil.CHARSET_UTF_8);
}
/**
* base32解码
*
* @param source 被解码的base32字符串
* @param charset 字符集
* @return 被加密后的字符串
*/
public static String decodeStrHex(String source, Charset charset) {
return StrUtil.str(decodeHex(source), charset);
}
}
| dromara/hutool | hutool-core/src/main/java/cn/hutool/core/codec/Base32.java |
1,478 | package org.jcp.jsr331.hakan;
/**
*
* Least Diff problem in JSR331.
*
* Minimize the difference ABCDE - FGHIJ
* where A..J is all different in the range 0..9.
*
* The solution is: 50123 - 49876 = 247
*
* Compare with the following models:
* - Choco: http://www.hakank.org/choco/LeastDiff2.java
* - Comet: http://www.hakank.org/comet/least_diff.co
* - ECLiPSE: http://www.hakank.org/eclipse/least_diff2.ecl
* - Gecode/R: http://www.hakank.org/gecode_r/least_diff.rb
* - Gecode: http://www.hakank.org/gecode/least_diff.cpp
* - Google CP Solver: http://www.hakank.org/google_or_tools/least_diff.py
* - JaCoP: http://www.hakank.org/JaCoP/LeastDiff.java
* - MiniZinc: http://www.hakank.org/minizinc/least_diff.mzn
* - SICStus: http://www.hakank.org/sicstus/least_diff.pl
* - Tailor/Essence': http://www.hakank.org/tailor/leastDiff.eprime
* - Zinc: http://www.hakank.org/minizinc/least_diff.zinc
*
* Model by Hakan Kjellerstrand ([email protected])
* Also see http://www.hakank.org/jsr_331/
*
*/
import javax.constraints.*;
public class LeastDiff2 {
// used in solve()
Var[] letters;
// Var[] XArray;
// Var[] YArray;
Problem p = ProblemFactory.newProblem("LeastDiff");
public static void main(String[] args) {
LeastDiff2 leastDiff2 = new LeastDiff2();
leastDiff2.define();
leastDiff2.solve();
}
// Problem definition
public void define() {
// define variables
Var A = p.variable("A", 0, 9);
Var B = p.variable("B", 0, 9);
Var C = p.variable("C", 0, 9);
Var D = p.variable("D", 0, 9);
Var E = p.variable("E", 0, 9);
Var F = p.variable("F", 0, 9);
Var G = p.variable("G", 0, 9);
Var H = p.variable("H", 0, 9);
Var I = p.variable("I", 0, 9);
Var J = p.variable("J", 0, 9);
Var[] _letters = {A,B,C,D,E,F,G,H,I,J};
letters = _letters;
Var Diff = p.variable("Diff", 0, 1000);
// Var X = new javax.constraints.impl.Var(this, "X", 0, 100000);
Var X = p.variable("X", 0, 100000);
Var[] XArray = {A,B,C,D,E};
// Var Y = new javax.constraints.impl.Var(this, "Y", 0, 100000);
Var Y = p.variable("Y", 0, 100000);
Var[] YArray = {F,G,H,I,J};
// define and post constraints
try {
p.postAllDifferent(letters);
p.post(new int[]{10000, 1000, 100, 10, 1}, XArray, "=", X);
p.post(new int[]{10000, 1000, 100, 10, 1}, YArray, "=", Y);
p.post(X.minus(Y),"=", Diff);
} catch (Exception e) {
p.log("Error posting constraints: " + e);
System.exit(-1);
}
}
public void solve() {
//
// search
//
Solver solver = p.getSolver();
SearchStrategy strategy = solver.getSearchStrategy();
//strategy.setVars(letters);
// strategy.setVarSelectorType(VarSelectorType.INPUT_ORDER);
// strategy.setVarSelectorType(VarSelectorType.MIN_VALUE);
// strategy.setVarSelectorType(VarSelectorType.MAX_VALUE);
strategy.setVarSelectorType(VarSelectorType.MIN_DOMAIN);
// strategy.setVarSelectorType(VarSelectorType.MIN_DOMAIN_MIN_VALUE);
// strategy.setVarSelectorType(VarSelectorType.MIN_DOMAIN_RANDOM);
// strategy.setVarSelectorType(VarSelectorType.RANDOM);
// strategy.setVarSelectorType(VarSelectorType.MIN_DOMAIN_MAX_DEGREE);
// strategy.setVarSelectorType(VarSelectorType.MIN_DOMAIN_OVER_DEGREE);
// strategy.setVarSelectorType(VarSelectorType.MIN_DOMAIN_OVER_WEIGHTED_DEGREE);
// strategy.setVarSelectorType(VarSelectorType.MAX_WEIGHTED_DEGREE);
// strategy.setVarSelectorType(VarSelectorType.MAX_IMPACT);
// strategy.setVarSelectorType(VarSelectorType.MAX_DEGREE);
// strategy.setVarSelectorType(VarSelectorType.MAX_REGRET);
// strategy.setValueSelectorType(ValueSelectorType.IN_DOMAIN);
strategy.setValueSelectorType(ValueSelectorType.MIN);
// strategy.setValueSelectorType(ValueSelectorType.MAX);
// strategy.setValueSelectorType(ValueSelectorType.MIN_MAX_ALTERNATE);
// strategy.setValueSelectorType(ValueSelectorType.MIDDLE);
// strategy.setValueSelectorType(ValueSelectorType.MEDIAN);
// strategy.setValueSelectorType(ValueSelectorType.RANDOM);
// strategy.setValueSelectorType(ValueSelectorType.MIN_IMPACT);
// strategy.setValueSelectorType(ValueSelectorType.CUSTOM);
// solver.addSearchStrategy(new StrategyLogVariables(solver));
//
// solve
//
// solver.traceExecution(true);
Solution solution = solver.findOptimalSolution(Objective.MINIMIZE, p.getVar("Diff"));
if (solution == null) {
p.log("No solution");
} else {
solution.log();
System.out.print(solution.getValue("A") + "" +
solution.getValue("B") + "" +
solution.getValue("C") + "" +
solution.getValue("D") + "" +
solution.getValue("E") + "" +
" - " +
solution.getValue("F") + "" +
solution.getValue("G") + "" +
solution.getValue("H") + "" +
solution.getValue("I") + "" +
solution.getValue("J") + "" +
" = ");
System.out.println(solution.getValue("Diff"));
}
solver.logStats();
}
}
| hakank/hakank | jsr_331/LeastDiff2.java |
1,479 | /*
* RED5 Open Source Media Server - https://github.com/Red5/ Copyright 2006-2023 by respective authors (see below). All rights reserved. Licensed under the Apache License, Version
* 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless
* required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
package org.red5.server.stream;
import static org.red5.server.net.rtmp.message.Constants.TYPE_AUDIO_DATA;
import static org.red5.server.net.rtmp.message.Constants.TYPE_INVOKE;
import static org.red5.server.net.rtmp.message.Constants.TYPE_NOTIFY;
import static org.red5.server.net.rtmp.message.Constants.TYPE_VIDEO_DATA;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.ref.WeakReference;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.management.InstanceAlreadyExistsException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.management.StandardMBean;
import org.apache.commons.lang3.StringUtils;
import org.apache.mina.core.buffer.IoBuffer;
import org.red5.codec.IAudioStreamCodec;
import org.red5.codec.IStreamCodecInfo;
import org.red5.codec.IVideoStreamCodec;
import org.red5.codec.StreamCodecInfo;
import org.red5.io.amf.Output;
import org.red5.server.api.IConnection;
import org.red5.server.api.Red5;
import org.red5.server.api.event.IEvent;
import org.red5.server.api.event.IEventDispatcher;
import org.red5.server.api.event.IEventListener;
import org.red5.server.api.scope.IScope;
import org.red5.server.api.statistics.IClientBroadcastStreamStatistics;
import org.red5.server.api.statistics.support.StatisticsCounter;
import org.red5.server.api.stream.IClientBroadcastStream;
import org.red5.server.api.stream.IStreamAwareScopeHandler;
import org.red5.server.api.stream.IStreamCapableConnection;
import org.red5.server.api.stream.IStreamListener;
import org.red5.server.api.stream.IStreamPacket;
import org.red5.server.api.stream.StreamState;
import org.red5.server.jmx.mxbeans.ClientBroadcastStreamMXBean;
import org.red5.server.messaging.IConsumer;
import org.red5.server.messaging.IFilter;
import org.red5.server.messaging.IMessage;
import org.red5.server.messaging.IMessageComponent;
import org.red5.server.messaging.IMessageOutput;
import org.red5.server.messaging.IPipe;
import org.red5.server.messaging.IPipeConnectionListener;
import org.red5.server.messaging.IProvider;
import org.red5.server.messaging.IPushableConsumer;
import org.red5.server.messaging.OOBControlMessage;
import org.red5.server.messaging.PipeConnectionEvent;
import org.red5.server.net.rtmp.event.AudioData;
import org.red5.server.net.rtmp.event.IRTMPEvent;
import org.red5.server.net.rtmp.event.Notify;
import org.red5.server.net.rtmp.event.VideoData;
import org.red5.server.net.rtmp.message.Constants;
import org.red5.server.net.rtmp.message.Header;
import org.red5.server.net.rtmp.status.Status;
import org.red5.server.net.rtmp.status.StatusCodes;
import org.red5.server.stream.message.RTMPMessage;
import org.red5.server.stream.message.StatusMessage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jmx.export.annotation.ManagedResource;
/**
* Represents live stream broadcasted from client. As Flash Media Server, Red5 supports recording mode for live streams, that is,
* broadcasted stream has broadcast mode. It can be either "live" or "record" and latter causes server-side application to record
* broadcasted stream.
*
* Note that recorded streams are recorded as FLV files.
*
* This type of stream uses two different pipes for live streaming and recording.
*
* @author The Red5 Project
* @author Steven Gong
* @author Paul Gregoire ([email protected])
* @author Vladimir Hmelyoff ([email protected])
*/
@ManagedResource(objectName = "org.red5.server:type=ClientBroadcastStream", description = "ClientBroadcastStream")
public class ClientBroadcastStream extends AbstractClientStream implements IClientBroadcastStream, IFilter, IPushableConsumer, IPipeConnectionListener, IEventDispatcher, IClientBroadcastStreamStatistics, ClientBroadcastStreamMXBean {
private static final Logger log = LoggerFactory.getLogger(ClientBroadcastStream.class);
private static final boolean isDebug = log.isDebugEnabled();
/**
* Whether or not to automatically record the associated stream.
*/
protected boolean automaticRecording;
/**
* Total number of bytes received.
*/
protected volatile long bytesReceived;
/**
* Is there need to check video codec?
*/
protected volatile boolean checkVideoCodec;
/**
* Is there need to check audio codec?
*/
protected volatile boolean checkAudioCodec;
/**
* Data is sent by chunks, each of them has size
*/
protected int chunkSize;
/**
* Is this stream still active?
*/
protected AtomicBoolean closed = new AtomicBoolean(false);
/**
* Output endpoint that providers use
*/
protected transient IMessageOutput connMsgOut;
/**
* Stores timestamp of first packet
*/
protected long firstPacketTime = -1;
/**
* Pipe for live streaming
*/
protected transient IPipe livePipe;
/**
* Stream published name
*/
protected String publishedName;
/**
* Streaming parameters
*/
protected Map<String, String> parameters;
/**
* Is there need to send start notification?
*/
protected boolean sendStartNotification = true;
/**
* Stores statistics about subscribers.
*/
private transient StatisticsCounter subscriberStats = new StatisticsCounter();
/**
* Listeners to get notified about received packets.
*/
protected transient Set<IStreamListener> listeners = new CopyOnWriteArraySet<IStreamListener>();
/**
* Recording listener
*/
protected transient WeakReference<IRecordingListener> recordingListener;
protected volatile long latestTimeStamp = -1;
/**
* Whether or not to register with JMX.
*/
protected boolean registerJMX;
/**
* Stream name aliases for the entire server instance.
*/
protected static CopyOnWriteArraySet<String> localAliases = new CopyOnWriteArraySet<>();
/**
* Publish alias for the stream name.
*/
protected String nameAlias;
/**
* Subscribe aliases for this instance.
*/
protected CopyOnWriteArraySet<String> aliases;
/**
* Check and send notification if necessary
*
* @param event
* Event
*/
protected void checkSendNotifications(IEvent event) {
IEventListener source = event.getSource();
sendStartNotifications(source);
}
/**
* Closes stream, unsubscribes provides, sends stoppage notifications and broadcast close notification.
*/
public void close() {
//log.debug("Stream close: {}", publishedName);
if (closed.compareAndSet(false, true)) {
if (livePipe != null) {
livePipe.unsubscribe((IProvider) this);
}
// if we have a recording listener, inform that this stream is done
if (recordingListener != null) {
sendRecordStopNotify();
notifyRecordingStop();
// inform the listener to finish and close
recordingListener.get().stop();
}
sendPublishStopNotify();
// TODO: can we send the client something to make sure he stops sending data?
if (connMsgOut != null) {
connMsgOut.unsubscribe(this);
}
notifyBroadcastClose();
// clear the listener after all the notifications have been sent
if (recordingListener != null) {
recordingListener.clear();
}
// clear listeners
if (!listeners.isEmpty()) {
listeners.clear();
}
// deregister with jmx
unregisterJMX();
setState(StreamState.CLOSED);
// clear our aliases and from local registry
if (aliases != null) {
localAliases.removeAll(aliases);
aliases.clear();
}
// remove publish alias
if (nameAlias != null) {
localAliases.remove(nameAlias);
nameAlias = null;
}
}
}
/**
* Dispatches event
*
* @param event
* Event to dispatch
*/
@SuppressWarnings("null")
public void dispatchEvent(IEvent event) {
if (event instanceof IRTMPEvent && !closed.get()) {
switch (event.getType()) {
case STREAM_CONTROL:
case STREAM_DATA:
// create the event
final IRTMPEvent rtmpEvent;
try {
rtmpEvent = (IRTMPEvent) event;
} catch (ClassCastException e) {
log.error("Class cast exception in event dispatch", e);
return;
}
int eventTime = rtmpEvent.getTimestamp();
// verify and / or set source type
if (rtmpEvent.getSourceType() != Constants.SOURCE_TYPE_LIVE) {
rtmpEvent.setSourceType(Constants.SOURCE_TYPE_LIVE);
}
// get the buffer only once per call
IoBuffer buf = null;
if (rtmpEvent instanceof IStreamData && (buf = ((IStreamData<?>) rtmpEvent).getData()) != null) {
bytesReceived += buf.limit();
}
// get stream codec
IStreamCodecInfo codecInfo = getCodecInfo();
StreamCodecInfo info = null;
if (codecInfo instanceof StreamCodecInfo) {
info = (StreamCodecInfo) codecInfo;
}
//log.trace("Stream codec info: {}", info);
switch (rtmpEvent.getDataType()) {
case TYPE_AUDIO_DATA: // AudioData
//log.trace("Audio: {}", eventTime);
IAudioStreamCodec audioStreamCodec = null;
if (checkAudioCodec) {
// dont try to read codec info from 0 length audio packets
if (buf.limit() > 0) {
audioStreamCodec = AudioCodecFactory.getAudioCodec(buf);
if (info != null) {
info.setAudioCodec(audioStreamCodec);
}
checkAudioCodec = false;
}
} else if (codecInfo != null) {
audioStreamCodec = codecInfo.getAudioCodec();
}
if (audioStreamCodec != null) {
audioStreamCodec.addData(buf);
}
if (info != null) {
info.setHasAudio(true);
}
break;
case TYPE_VIDEO_DATA: // VideoData
//log.trace("Video: {}", eventTime);
IVideoStreamCodec videoStreamCodec = null;
if (checkVideoCodec) {
videoStreamCodec = VideoCodecFactory.getVideoCodec(buf);
if (info != null) {
info.setVideoCodec(videoStreamCodec);
}
checkVideoCodec = false;
} else if (codecInfo != null) {
videoStreamCodec = codecInfo.getVideoCodec();
}
if (videoStreamCodec != null) {
videoStreamCodec.addData(buf, eventTime);
}
if (info != null) {
info.setHasVideo(true);
}
break;
case TYPE_NOTIFY:
Notify notifyEvent = (Notify) rtmpEvent;
String action = notifyEvent.getAction();
//if (isDebug) {
//log.debug("Notify action: {}", action);
//}
if ("onMetaData".equals(action)) {
// store the metadata
try {
//log.debug("Setting metadata");
setMetaData(notifyEvent.duplicate());
} catch (Exception e) {
log.warn("Metadata could not be duplicated for this stream", e);
}
}
break;
case TYPE_INVOKE:
//Invoke invokeEvent = (Invoke) rtmpEvent;
//log.debug("Invoke action: {}", invokeEvent.getAction());
// event / stream listeners will not be notified of invokes
return;
default:
log.debug("Unknown: {}", rtmpEvent);
}
// update last event time
if (eventTime > latestTimeStamp) {
latestTimeStamp = eventTime;
}
// notify event listeners
checkSendNotifications(event);
// note this timestamp is set in event/body but not in the associated header
try {
// route to live
if (livePipe != null) {
// create new RTMP message, initialize it and push through pipe
RTMPMessage msg = RTMPMessage.build(rtmpEvent, eventTime);
livePipe.pushMessage(msg);
} else if (isDebug) {
log.debug("Live pipe was null, message was not pushed");
}
} catch (IOException err) {
stop();
}
// notify listeners about received packet
if (rtmpEvent instanceof IStreamPacket) {
for (IStreamListener listener : getStreamListeners()) {
try {
listener.packetReceived(this, (IStreamPacket) rtmpEvent);
} catch (Exception e) {
log.warn("Error while notifying listener {}", listener, e);
if (listener instanceof RecordingListener) {
sendRecordFailedNotify(e.getMessage());
}
}
}
}
break;
default:
// ignored event
if (isDebug) {
log.debug("Ignoring event: {}", event.getType());
}
}
} else {
if (isDebug) {
log.debug("Event was of wrong type or stream is closed ({})", closed);
}
}
}
/** {@inheritDoc} */
public int getActiveSubscribers() {
return subscriberStats.getCurrent();
}
/** {@inheritDoc} */
public long getBytesReceived() {
return bytesReceived;
}
/** {@inheritDoc} */
public int getCurrentTimestamp() {
return (int) latestTimeStamp;
}
/** {@inheritDoc} */
public int getMaxSubscribers() {
return subscriberStats.getMax();
}
/**
* Getter for provider
*
* @return Provider
*/
public IProvider getProvider() {
return this;
}
/**
* Setter for stream published name
*
* @param name
* Name that used for publishing. Set at client side when begin to broadcast with NetStream#publish.
*/
public void setPublishedName(String name) {
log.debug("setPublishedName: {}", name);
// a publish name of "false" is a special case, used when stopping a stream
if (StringUtils.isNotEmpty(name) && !"false".equals(name)) {
this.publishedName = name;
registerJMX();
}
}
/**
* Getter for published name
*
* @return Stream published name
*/
public String getPublishedName() {
return publishedName;
}
/** {@inheritDoc} */
public void setParameters(Map<String, String> params) {
this.parameters = params;
}
/** {@inheritDoc} */
public Map<String, String> getParameters() {
return parameters;
}
/** {@inheritDoc} */
public String getSaveFilename() {
if (recordingListener != null) {
return recordingListener.get().getFileName();
}
return null;
}
/** {@inheritDoc} */
public IClientBroadcastStreamStatistics getStatistics() {
return this;
}
/** {@inheritDoc} */
public int getTotalSubscribers() {
return subscriberStats.getTotal();
}
/**
* @return the automaticRecording
*/
public boolean isAutomaticRecording() {
return automaticRecording;
}
/**
* @param automaticRecording
* the automaticRecording to set
*/
public void setAutomaticRecording(boolean automaticRecording) {
this.automaticRecording = automaticRecording;
}
/**
* @param registerJMX
* the registerJMX to set
*/
public void setRegisterJMX(boolean registerJMX) {
this.registerJMX = registerJMX;
}
/**
* Notifies handler on stream broadcast close
*/
protected void notifyBroadcastClose() {
final IStreamAwareScopeHandler handler = getStreamAwareHandler();
if (handler != null) {
try {
handler.streamBroadcastClose(this);
} catch (Throwable t) {
log.error("Error in notifyBroadcastClose", t);
}
}
}
/**
* Notifies handler on stream recording stop
*/
protected void notifyRecordingStop() {
IStreamAwareScopeHandler handler = getStreamAwareHandler();
if (handler != null) {
try {
handler.streamRecordStop(this);
} catch (Throwable t) {
log.error("Error in notifyRecordingStop", t);
}
}
}
/**
* Notifies handler on stream broadcast start
*/
protected void notifyBroadcastStart() {
IStreamAwareScopeHandler handler = getStreamAwareHandler();
if (handler != null) {
try {
handler.streamBroadcastStart(this);
} catch (Throwable t) {
log.error("Error in notifyBroadcastStart", t);
}
}
// send metadata for creation and start dates
IoBuffer buf = IoBuffer.allocate(256);
buf.setAutoExpand(true);
Output out = new Output(buf);
out.writeString("onMetaData");
Map<Object, Object> params = new HashMap<>();
Calendar cal = GregorianCalendar.getInstance();
cal.setTimeInMillis(creationTime);
params.put("creationdate", ZonedDateTime.ofInstant(cal.toInstant(), ZoneId.of("UTC")).format(DateTimeFormatter.ISO_INSTANT));
cal.setTimeInMillis(startTime);
params.put("startdate", ZonedDateTime.ofInstant(cal.toInstant(), ZoneId.of("UTC")).format(DateTimeFormatter.ISO_INSTANT));
if (isDebug) {
log.debug("Params: {}", params);
}
out.writeMap(params);
buf.flip();
Notify notify = new Notify(buf);
notify.setAction("onMetaData");
notify.setHeader(new Header());
notify.getHeader().setDataType(Notify.TYPE_STREAM_METADATA);
notify.getHeader().setStreamId(0);
notify.setTimestamp(0);
dispatchEvent(notify);
}
/**
* Send OOB control message with chunk size
*/
protected void notifyChunkSize() {
if (chunkSize > 0 && livePipe != null) {
OOBControlMessage setChunkSize = new OOBControlMessage();
setChunkSize.setTarget("ConnectionConsumer");
setChunkSize.setServiceName("chunkSize");
if (setChunkSize.getServiceParamMap() == null) {
setChunkSize.setServiceParamMap(new HashMap<String, Object>());
}
setChunkSize.getServiceParamMap().put("chunkSize", chunkSize);
livePipe.sendOOBControlMessage(getProvider(), setChunkSize);
}
}
/**
* Out-of-band control message handler
*
* @param source
* OOB message source
* @param pipe
* Pipe that used to send OOB message
* @param oobCtrlMsg
* Out-of-band control message
*/
public void onOOBControlMessage(IMessageComponent source, IPipe pipe, OOBControlMessage oobCtrlMsg) {
String target = oobCtrlMsg.getTarget();
if ("ClientBroadcastStream".equals(target)) {
String serviceName = oobCtrlMsg.getServiceName();
if ("chunkSize".equals(serviceName)) {
chunkSize = (Integer) oobCtrlMsg.getServiceParamMap().get("chunkSize");
notifyChunkSize();
} else {
log.debug("Unhandled OOB control message for service: {}", serviceName);
}
} else {
log.debug("Unhandled OOB control message to target: {}", target);
}
}
/**
* Pipe connection event handler
*
* @param event
* Pipe connection event
*/
@SuppressWarnings("unused")
public void onPipeConnectionEvent(PipeConnectionEvent event) {
switch (event.getType()) {
case PROVIDER_CONNECT_PUSH:
//log.debug("Provider connect");
if (event.getProvider() == this && event.getSource() != connMsgOut && (event.getParamMap() == null || !event.getParamMap().containsKey("record"))) {
livePipe = (IPipe) event.getSource();
//log.debug("Provider: {}", livePipe.getClass().getName());
for (IConsumer consumer : livePipe.getConsumers()) {
subscriberStats.increment();
}
}
break;
case PROVIDER_DISCONNECT:
//log.debug("Provider disconnect");
//if (isDebug && livePipe != null) {
//log.debug("Provider: {}", livePipe.getClass().getName());
//}
if (livePipe == event.getSource()) {
livePipe = null;
}
break;
case CONSUMER_CONNECT_PUSH:
//log.debug("Consumer connect");
IPipe pipe = (IPipe) event.getSource();
//if (isDebug && pipe != null) {
//log.debug("Consumer: {}", pipe.getClass().getName());
//}
if (livePipe == pipe) {
notifyChunkSize();
}
subscriberStats.increment();
break;
case CONSUMER_DISCONNECT:
//log.debug("Consumer disconnect: {}", event.getSource().getClass().getName());
subscriberStats.decrement();
break;
default:
}
}
/**
* Currently not implemented
*
* @param pipe
* Pipe
* @param message
* Message
*/
public void pushMessage(IPipe pipe, IMessage message) {
}
/**
* Save broadcasted stream.
*
* @param name
* Stream name
* @param isAppend
* Append mode
* @throws IOException
* File could not be created/written to
*/
public void saveAs(String name, boolean isAppend) throws IOException {
//log.debug("SaveAs - name: {} append: {}", name, isAppend);
// get connection to check if client is still streaming
IStreamCapableConnection conn = getConnection();
if (conn == null) {
throw new IOException("Stream is no longer connected");
}
// one recording listener at a time via this entry point
if (recordingListener == null) {
// XXX Paul: Revisit this section to allow for implementation of custom IRecordingListener
//IRecordingListener listener = (IRecordingListener) ScopeUtils.getScopeService(conn.getScope(), IRecordingListener.class, RecordingListener.class, false);
// create a recording listener
IRecordingListener listener = new RecordingListener();
//log.debug("Created: {}", listener);
// initialize the listener
if (listener.init(conn, name, isAppend)) {
// get decoder info if it exists for the stream
IStreamCodecInfo codecInfo = getCodecInfo();
//log.debug("Codec info: {}", codecInfo);
if (codecInfo instanceof StreamCodecInfo) {
StreamCodecInfo info = (StreamCodecInfo) codecInfo;
IVideoStreamCodec videoCodec = info.getVideoCodec();
//log.debug("Video codec: {}", videoCodec);
if (videoCodec != null) {
//check for decoder configuration to send
IoBuffer config = videoCodec.getDecoderConfiguration();
if (config != null) {
//log.debug("Decoder configuration is available for {}", videoCodec.getName());
VideoData videoConf = new VideoData(config.asReadOnlyBuffer());
try {
//log.debug("Setting decoder configuration for recording");
listener.getFileConsumer().setVideoDecoderConfiguration(videoConf);
} finally {
videoConf.release();
}
}
} else {
log.debug("Could not initialize stream output, videoCodec is null.");
}
IAudioStreamCodec audioCodec = info.getAudioCodec();
//log.debug("Audio codec: {}", audioCodec);
if (audioCodec != null) {
//check for decoder configuration to send
IoBuffer config = audioCodec.getDecoderConfiguration();
if (config != null) {
//log.debug("Decoder configuration is available for {}", audioCodec.getName());
AudioData audioConf = new AudioData(config.asReadOnlyBuffer());
try {
//log.debug("Setting decoder configuration for recording");
listener.getFileConsumer().setAudioDecoderConfiguration(audioConf);
} finally {
audioConf.release();
}
}
} else {
log.debug("No decoder configuration available, audioCodec is null.");
}
}
// set as primary listener
recordingListener = new WeakReference<IRecordingListener>(listener);
// add as a listener
addStreamListener(listener);
// start the listener thread
listener.start();
} else {
log.warn("Recording listener failed to initialize for stream: {}", name);
}
} else {
log.debug("Recording listener already exists for stream: {} auto record enabled: {}", name, automaticRecording);
}
}
/**
* Sends publish start notifications
*/
protected void sendPublishStartNotify() {
Status publishStatus = new Status(StatusCodes.NS_PUBLISH_START);
publishStatus.setClientid(getStreamId());
publishStatus.setDetails(getPublishedName());
StatusMessage startMsg = new StatusMessage();
startMsg.setBody(publishStatus);
pushMessage(startMsg);
setState(StreamState.PUBLISHING);
}
/**
* Sends publish stop notifications
*/
protected void sendPublishStopNotify() {
Status stopStatus = new Status(StatusCodes.NS_UNPUBLISHED_SUCCESS);
stopStatus.setClientid(getStreamId());
stopStatus.setDetails(getPublishedName());
StatusMessage stopMsg = new StatusMessage();
stopMsg.setBody(stopStatus);
pushMessage(stopMsg);
setState(StreamState.STOPPED);
}
/**
* Sends record failed notifications
*/
protected void sendRecordFailedNotify(String reason) {
Status failedStatus = new Status(StatusCodes.NS_RECORD_FAILED);
failedStatus.setLevel(Status.ERROR);
failedStatus.setClientid(getStreamId());
failedStatus.setDetails(getPublishedName());
failedStatus.setDesciption(reason);
StatusMessage failedMsg = new StatusMessage();
failedMsg.setBody(failedStatus);
pushMessage(failedMsg);
}
/**
* Sends record start notifications
*/
protected void sendRecordStartNotify() {
Status recordStatus = new Status(StatusCodes.NS_RECORD_START);
recordStatus.setClientid(getStreamId());
recordStatus.setDetails(getPublishedName());
StatusMessage startMsg = new StatusMessage();
startMsg.setBody(recordStatus);
pushMessage(startMsg);
}
/**
* Sends record stop notifications
*/
protected void sendRecordStopNotify() {
Status stopStatus = new Status(StatusCodes.NS_RECORD_STOP);
stopStatus.setClientid(getStreamId());
stopStatus.setDetails(getPublishedName());
StatusMessage stopMsg = new StatusMessage();
stopMsg.setBody(stopStatus);
pushMessage(stopMsg);
}
/**
* Pushes a message out to a consumer.
*
* @param msg
* StatusMessage
*/
protected void pushMessage(StatusMessage msg) {
if (connMsgOut != null) {
try {
connMsgOut.pushMessage(msg);
} catch (IOException err) {
log.error("Error while pushing message: {}", msg, err);
}
} else {
log.warn("Consumer message output is null");
}
}
protected void sendStartNotifications(IEventListener source) {
if (sendStartNotification) {
// notify handler that stream starts recording/publishing
sendStartNotification = false;
if (source instanceof IConnection) {
IScope scope = ((IConnection) source).getScope();
if (scope.hasHandler()) {
final Object handler = scope.getHandler();
if (handler instanceof IStreamAwareScopeHandler) {
if (recordingListener != null && recordingListener.get().isRecording()) {
// callback for record start
((IStreamAwareScopeHandler) handler).streamRecordStart(this);
} else {
// delete any previously recorded versions of this now "live" stream per
// http://livedocs.adobe.com/flashmediaserver/3.0/hpdocs/help.html?content=00000186.html
// try {
// File file = getRecordFile(scope, publishedName);
// if (file != null && file.exists()) {
// if (!file.delete()) {
// log.debug("File was not deleted: {}", file.getAbsoluteFile());
// }
// }
// } catch (Exception e) {
// log.warn("Exception removing previously recorded file", e);
// }
// callback for publish start
((IStreamAwareScopeHandler) handler).streamPublishStart(this);
}
}
}
}
// send start notifications
sendPublishStartNotify();
if (recordingListener != null && recordingListener.get().isRecording()) {
sendRecordStartNotify();
}
notifyBroadcastStart();
}
}
/**
* Starts stream, creates pipes, connects
*/
public void start() {
//log.info("Stream start: {}", publishedName);
checkVideoCodec = true;
checkAudioCodec = true;
firstPacketTime = -1;
latestTimeStamp = -1;
bytesReceived = 0;
IConsumerService consumerManager = (IConsumerService) getScope().getContext().getBean(IConsumerService.KEY);
connMsgOut = consumerManager.getConsumerOutput(this);
if (connMsgOut != null && connMsgOut.subscribe(this, null)) {
// technically this would be a 'start' time
startTime = System.currentTimeMillis();
} else {
log.warn("Subscribe failed");
}
setState(StreamState.STARTED);
}
/** {@inheritDoc} */
public void startPublishing() {
// We send the start messages before the first packet is received.
// This is required so FME actually starts publishing.
sendStartNotifications(Red5.getConnectionLocal());
// force recording if set
if (automaticRecording) {
//log.debug("Starting automatic recording of {}", publishedName);
try {
saveAs(publishedName, false);
} catch (Exception e) {
log.warn("Start of automatic recording failed", e);
}
}
}
/** {@inheritDoc} */
public void stop() {
//log.info("Stream stop: {}", publishedName);
setState(StreamState.STOPPED);
stopRecording();
close();
}
/**
* Stops any currently active recording.
*/
public void stopRecording() {
IRecordingListener listener = null;
if (recordingListener != null && (listener = recordingListener.get()).isRecording()) {
sendRecordStopNotify();
notifyRecordingStop();
// remove the listener
removeStreamListener(listener);
// stop the recording listener
listener.stop();
// clear and null-out the thread local
recordingListener.clear();
recordingListener = null;
}
}
public boolean isRecording() {
return recordingListener != null && recordingListener.get().isRecording();
}
/** {@inheritDoc} */
public void addStreamListener(IStreamListener listener) {
listeners.add(listener);
}
/** {@inheritDoc} */
public Collection<IStreamListener> getStreamListeners() {
return listeners;
}
/** {@inheritDoc} */
public void removeStreamListener(IStreamListener listener) {
listeners.remove(listener);
}
/**
* Get the file we'd be recording to based on scope and given name.
*
* @param scope
* scope
* @param name
* record name
* @return file
*/
protected File getRecordFile(IScope scope, String name) {
return RecordingListener.getRecordFile(scope, name);
}
protected void registerJMX() {
if (registerJMX && StringUtils.isNotEmpty(publishedName) && !"false".equals(publishedName)) {
// register with jmx
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try {
// replace any colons with pipes as they are invalid characters for jmx object names
ObjectName oName = new ObjectName(String.format("org.red5.server:type=ClientBroadcastStream,scope=%s,publishedName=%s", getScope().getName(), publishedName.replaceAll(":", "|")));
mbs.registerMBean(new StandardMBean(this, ClientBroadcastStreamMXBean.class, true), oName);
} catch (InstanceAlreadyExistsException e) {
log.debug("Instance already registered", e);
} catch (Exception e) {
log.warn("Error on jmx registration", e);
}
}
}
protected void unregisterJMX() {
if (registerJMX) {
if (StringUtils.isNotEmpty(publishedName) && !"false".equals(publishedName)) {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try {
// replace any colons with pipes as they are invalid characters for jmx object names
ObjectName oName = new ObjectName(String.format("org.red5.server:type=ClientBroadcastStream,scope=%s,publishedName=%s", getScope().getName(), publishedName.replaceAll(":", "|")));
mbs.unregisterMBean(oName);
} catch (Exception e) {
log.warn("Exception unregistering", e);
}
}
}
}
@Override
public boolean addAlias(String alias) {
log.debug("Adding alias: {}", alias);
if (aliases == null) {
aliases = new CopyOnWriteArraySet<>();
}
// check local registry first then attempt the add
if (!localAliases.contains(alias) && aliases.add(alias)) {
return true;
}
return false;
}
@Override
public boolean hasAlias() {
if (aliases != null && !aliases.isEmpty()) {
return true;
}
return false;
}
@Override
public String getAlias() {
String alias = null;
if (hasAlias()) {
int bound = aliases.size();
if (bound > 1) {
int index = ThreadLocalRandom.current().nextInt(bound);
alias = aliases.stream().skip(index).findFirst().get();
} else {
alias = aliases.stream().findFirst().get();
}
log.debug("Returning alias: {}", alias);
}
return alias;
}
@Override
public boolean containsAlias(String alias) {
if (aliases != null && !aliases.isEmpty()) {
return aliases.contains(alias);
}
return false;
}
@Override
public Set<String> getAliases() {
if (aliases != null) {
return aliases;
}
return Collections.emptySet();
}
@Override
public void setNameAlias(String nameAlias) {
// remove any existing registration
if (this.nameAlias != null && nameAlias != null) {
if (localAliases.remove(this.nameAlias)) {
log.warn("Publish name: {} has hijacked previously registered alias", nameAlias);
}
}
// this will overwrite any existing value
this.nameAlias = nameAlias;
}
@Override
public String getNameAlias() {
return nameAlias;
}
@Override
public boolean aliasRegistered(String alias) {
return localAliases.contains(alias);
}
}
| Red5/red5-server | common/src/main/java/org/red5/server/stream/ClientBroadcastStream.java |
1,480 | package mage.sets;
import mage.cards.Card;
import mage.cards.ExpansionSet;
import mage.cards.repository.CardInfo;
import mage.constants.Rarity;
import mage.constants.SetType;
import mage.util.RandomUtil;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* https://mtg.gamepedia.com/Mystery_Booster
* https://magic.wizards.com/en/articles/archive/feature/unraveling-mystery-booster-2019-11-14
* Print sheets used for booster construction sourced from http://www.lethe.xyz/mtg/collation/mb1.html
* <p>
* This set has a very special booster layout: Each slot draws from its own distinct print sheet and
* all cards have an equal probability. Therefore, this class implements booster construction by using
* a list of cards for each slot in the booster.
*
* @author TheElk801, xenohedron
*/
public class MysteryBooster extends ExpansionSet {
private static final MysteryBooster instance = new MysteryBooster();
public static MysteryBooster getInstance() {
return instance;
}
/**
* This map defines which cards can go into which booster slot.
* Will be populated when the first booster is requested.
* <p>
* Of note, this is booster collation for MB1, the 2019 Mystery Booster.
* In the 2021 versions (not collated here), there are 1692 cards in common, and two changes:
* - Goblin Trenches added instead of Selesnya Guildmage (See Scryfall note on https://scryfall.com/card/mb1/1695/goblin-trenches )
* - Prophetic Bolt added instead of Nucklavee (See Scryfall note on https://scryfall.com/card/mb1/1696/prophetic-bolt )
*/
protected final Map<Integer, List<CardInfo>> boosterMap = new HashMap<>();
private MysteryBooster() {
super("Mystery Booster", "MB1", ExpansionSet.buildDate(2019, 11, 7), SetType.REMIX);
this.hasBoosters = true;
this.hasBasicLands = false;
// This set is implemented for the purpose of booster generation only.
// Rather than include cards (scryfall moved them to PLST with thousands of other cards) with duplicate printings,
// the collation just draws from the original printings in other sets.
// However, this set would be excluded from view if it contained no cards, so these two cards are here as a workaround.
cards.add(new SetCardInfo("Goblin Trenches", 203, Rarity.RARE, mage.cards.g.GoblinTrenches.class));
cards.add(new SetCardInfo("Prophetic Bolt", 231, Rarity.RARE, mage.cards.p.PropheticBolt.class));
}
@Override
public List<Card> createBooster() {
synchronized (boosterMap) {
// init the map only once, on first booster gen
if (boosterMap.isEmpty()) {
initBoosterMap();
}
}
final List<Card> booster = new ArrayList<>(15);
for (int slot = 1; slot < 16; ++slot) {
final List<CardInfo> availableCards = boosterMap.get(slot);
final int printSheetCardNumber = RandomUtil.nextInt(availableCards.size());
final Card chosenCard = availableCards.get(printSheetCardNumber).createCard();
booster.add(chosenCard);
}
return booster;
}
private void initBoosterMap() {
for (int i = 1; i < 16; ++i) {
this.boosterMap.put(i, new ArrayList<>(121));
}
addSlot1whiteA(boosterMap.get(1));
addSlot2whiteB(boosterMap.get(2));
addSlot3blueA(boosterMap.get(3));
addSlot4blueB(boosterMap.get(4));
addSlot5blackA(boosterMap.get(5));
addSlot6blackB(boosterMap.get(6));
addSlot7redA(boosterMap.get(7));
addSlot8redB(boosterMap.get(8));
addSlot9greenA(boosterMap.get(9));
addSlot10greenB(boosterMap.get(10));
addSlot11multicolored(boosterMap.get(11));
addSlot12colorless(boosterMap.get(12));
addSlot13oldFrame(boosterMap.get(13));
addSlot14rare(boosterMap.get(14));
addSlot15foil(boosterMap.get(15));
}
private void addSlot1whiteA(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Abzan Falconer", "KTK", "2");
addCardInfoToList(boosterList, "Abzan Runemark", "FRF", "3");
addCardInfoToList(boosterList, "Acrobatic Maneuver", "KLD", "1");
addCardInfoToList(boosterList, "Affa Protector", "OGW", "14");
addCardInfoToList(boosterList, "Ainok Bond-Kin", "KTK", "3");
addCardInfoToList(boosterList, "Alley Evasion", "AER", "6");
addCardInfoToList(boosterList, "Angelic Purge", "SOI", "3");
addCardInfoToList(boosterList, "Angelsong", "DDC", "15");
addCardInfoToList(boosterList, "Apostle's Blessing", "MM2", "8");
addCardInfoToList(boosterList, "Arrester's Zeal", "RNA", "4");
addCardInfoToList(boosterList, "Artful Maneuver", "DTK", "4");
addCardInfoToList(boosterList, "Aura of Silence", "C15", "60");
addCardInfoToList(boosterList, "Bartered Cow", "ELD", "6");
addCardInfoToList(boosterList, "Bonds of Faith", "DDQ", "2");
addCardInfoToList(boosterList, "Borrowed Grace", "EMN", "14");
addCardInfoToList(boosterList, "Bulwark Giant", "WAR", "7");
addCardInfoToList(boosterList, "Caravan Escort", "DDP", "3");
addCardInfoToList(boosterList, "Caught in the Brights", "AER", "10");
addCardInfoToList(boosterList, "Celestial Crusader", "C14", "68");
addCardInfoToList(boosterList, "Celestial Flare", "ORI", "8");
addCardInfoToList(boosterList, "Center Soul", "DTK", "8");
addCardInfoToList(boosterList, "Cliffside Lookout", "BFZ", "20");
addCardInfoToList(boosterList, "Conviction", "AER", "12");
addCardInfoToList(boosterList, "Countless Gears Renegade", "AER", "13");
addCardInfoToList(boosterList, "Court Street Denizen", "DDO", "5");
addCardInfoToList(boosterList, "Crib Swap", "C18", "65");
addCardInfoToList(boosterList, "Danitha Capashen, Paragon", "DOM", "12");
addCardInfoToList(boosterList, "Daring Skyjek", "GK1", "79");
addCardInfoToList(boosterList, "Decommission", "AER", "16");
addCardInfoToList(boosterList, "Defiant Strike", "WAR", "9");
addCardInfoToList(boosterList, "Desperate Sentry", "EMN", "21");
addCardInfoToList(boosterList, "Devilthorn Fox", "SOI", "14");
addCardInfoToList(boosterList, "Disposal Mummy", "HOU", "9");
addCardInfoToList(boosterList, "Divine Favor", "M15", "10");
addCardInfoToList(boosterList, "Dragon's Eye Sentry", "DTK", "11");
addCardInfoToList(boosterList, "Dragon's Presence", "GS1", "16");
addCardInfoToList(boosterList, "Eddytrail Hawk", "KLD", "12");
addCardInfoToList(boosterList, "Enduring Victory", "DTK", "16");
addCardInfoToList(boosterList, "Enlightened Ascetic", "ORI", "12");
addCardInfoToList(boosterList, "Ephemeral Shields", "M15", "11");
addCardInfoToList(boosterList, "Ephemerate", "MH1", "7");
addCardInfoToList(boosterList, "Excoriate", "E01", "5");
addCardInfoToList(boosterList, "Expose Evil", "SOI", "19");
addCardInfoToList(boosterList, "Eyes in the Skies", "MM3", "5");
addCardInfoToList(boosterList, "Faith's Fetters", "UMA", "16");
addCardInfoToList(boosterList, "Feat of Resistance", "KTK", "10");
addCardInfoToList(boosterList, "Felidar Umbra", "PCA", "6");
addCardInfoToList(boosterList, "Firehoof Cavalry", "KTK", "11");
addCardInfoToList(boosterList, "Ghostblade Eidolon", "C15", "70");
addCardInfoToList(boosterList, "Gift of Estates", "C14", "73");
addCardInfoToList(boosterList, "Glaring Aegis", "DTK", "18");
addCardInfoToList(boosterList, "Glint-Sleeve Artisan", "KLD", "17");
addCardInfoToList(boosterList, "God-Pharaoh's Faithful", "HOU", "14");
addCardInfoToList(boosterList, "Grasp of the Hieromancer", "E01", "13");
addCardInfoToList(boosterList, "Gust Walker", "AKH", "17");
addCardInfoToList(boosterList, "Gustcloak Skirmisher", "DDO", "13");
addCardInfoToList(boosterList, "Healing Hands", "ORI", "17");
addCardInfoToList(boosterList, "Hyena Umbra", "UMA", "21");
addCardInfoToList(boosterList, "Infantry Veteran", "DDN", "3");
addCardInfoToList(boosterList, "Inquisitor's Ox", "SOI", "24");
addCardInfoToList(boosterList, "Isolation Zone", "OGW", "22");
addCardInfoToList(boosterList, "Knight of Old Benalia", "MH1", "17");
addCardInfoToList(boosterList, "Knight of Sorrows", "RNA", "14");
addCardInfoToList(boosterList, "Kor Skyfisher", "DDO", "16");
addCardInfoToList(boosterList, "Leonin Relic-Warder", "C17", "65");
addCardInfoToList(boosterList, "Lightform", "C18", "68");
addCardInfoToList(boosterList, "Lone Missionary", "DDN", "49");
addCardInfoToList(boosterList, "Lonesome Unicorn", "ELD", "21");
addCardInfoToList(boosterList, "Lotus-Eye Mystics", "UMA", "23");
addCardInfoToList(boosterList, "Loxodon Partisan", "DDO", "17");
addCardInfoToList(boosterList, "Mardu Hordechief", "KTK", "17");
addCardInfoToList(boosterList, "Marked by Honor", "M15", "17");
addCardInfoToList(boosterList, "Meditation Puzzle", "M15", "19");
addCardInfoToList(boosterList, "Mortal's Ardor", "DDO", "19");
addCardInfoToList(boosterList, "Mother of Runes", "DDO", "20");
addCardInfoToList(boosterList, "Ninth Bridge Patrol", "KLD", "22");
addCardInfoToList(boosterList, "Ondu Greathorn", "BFZ", "40");
addCardInfoToList(boosterList, "Ondu War Cleric", "OGW", "31");
addCardInfoToList(boosterList, "Oreskos Swiftclaw", "M15", "22");
addCardInfoToList(boosterList, "Oust", "DDP", "7");
addCardInfoToList(boosterList, "Palace Jailer", "CN2", "18");
addCardInfoToList(boosterList, "Path to Exile", "E02", "3");
addCardInfoToList(boosterList, "Peace of Mind", "EMN", "36");
addCardInfoToList(boosterList, "Prowling Caracal", "RNA", "17");
addCardInfoToList(boosterList, "Resurrection", "UMA", "30");
addCardInfoToList(boosterList, "Rhet-Crop Spearmaster", "AKH", "26");
addCardInfoToList(boosterList, "Righteous Cause", "CMA", "21");
addCardInfoToList(boosterList, "Savannah Lions", "A25", "33");
addCardInfoToList(boosterList, "Searing Light", "OGW", "33");
addCardInfoToList(boosterList, "Serra's Embrace", "DDC", "21");
addCardInfoToList(boosterList, "Sheer Drop", "BFZ", "48");
addCardInfoToList(boosterList, "Shining Aerosaur", "XLN", "36");
addCardInfoToList(boosterList, "Shining Armor", "ELD", "29");
addCardInfoToList(boosterList, "Siegecraft", "KTK", "23");
addCardInfoToList(boosterList, "Skymarcher Aspirant", "RIX", "21");
addCardInfoToList(boosterList, "Skyspear Cavalry", "UMA", "36");
addCardInfoToList(boosterList, "Snubhorn Sentry", "RIX", "23");
addCardInfoToList(boosterList, "Soul Parry", "DDO", "24");
addCardInfoToList(boosterList, "Soul Summons", "FRF", "26");
addCardInfoToList(boosterList, "Soul-Strike Technique", "MH1", "30");
addCardInfoToList(boosterList, "Soulmender", "M15", "35");
addCardInfoToList(boosterList, "Sparring Mummy", "AKH", "29");
addCardInfoToList(boosterList, "Spectral Gateguards", "DDQ", "19");
addCardInfoToList(boosterList, "Stave Off", "DDN", "61");
addCardInfoToList(boosterList, "Steadfast Sentinel", "HOU", "24");
addCardInfoToList(boosterList, "Stone Haven Medic", "BFZ", "51");
addCardInfoToList(boosterList, "Suppression Bonds", "ORI", "34");
addCardInfoToList(boosterList, "Survive the Night", "SOI", "41");
addCardInfoToList(boosterList, "Territorial Hammerskull", "XLN", "41");
addCardInfoToList(boosterList, "Thraben Inspector", "SOI", "44");
addCardInfoToList(boosterList, "Thraben Standard Bearer", "EMN", "48");
addCardInfoToList(boosterList, "Topan Freeblade", "ORI", "36");
addCardInfoToList(boosterList, "Veteran Swordsmith", "DDO", "28");
addCardInfoToList(boosterList, "Village Bell-Ringer", "DDQ", "22");
addCardInfoToList(boosterList, "Voice of the Provinces", "DDQ", "23");
addCardInfoToList(boosterList, "Wall of One Thousand Cuts", "MH1", "36");
addCardInfoToList(boosterList, "Wandering Champion", "UMA", "42");
addCardInfoToList(boosterList, "War Behemoth", "KTK", "29");
addCardInfoToList(boosterList, "Windborne Charge", "CN2", "100");
addCardInfoToList(boosterList, "Wing Shards", "IMA", "38");
addCardInfoToList(boosterList, "Winged Shepherd", "AKH", "39");
}
private void addSlot2whiteB(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Adanto Vanguard", "XLN", "1");
addCardInfoToList(boosterList, "Ajani's Pridemate", "WAR", "4");
addCardInfoToList(boosterList, "Angel of Mercy", "IMA", "6");
addCardInfoToList(boosterList, "Angel of Renewal", "BFZ", "18");
addCardInfoToList(boosterList, "Angelic Gift", "BBD", "88");
addCardInfoToList(boosterList, "Arrest", "MM2", "9");
addCardInfoToList(boosterList, "Aven Battle Priest", "ORI", "6");
addCardInfoToList(boosterList, "Aven Sentry", "DOM", "3");
addCardInfoToList(boosterList, "Ballynock Cohort", "EMA", "3");
addCardInfoToList(boosterList, "Battle Mastery", "BBD", "89");
addCardInfoToList(boosterList, "Benevolent Ancestor", "IMA", "12");
addCardInfoToList(boosterList, "Blade Instructor", "GRN", "1");
addCardInfoToList(boosterList, "Blessed Spirits", "ORI", "7");
addCardInfoToList(boosterList, "Built to Last", "KLD", "7");
addCardInfoToList(boosterList, "Candlelight Vigil", "GRN", "3");
addCardInfoToList(boosterList, "Cartouche of Solidarity", "AKH", "7");
addCardInfoToList(boosterList, "Cast Out", "AKH", "8");
addCardInfoToList(boosterList, "Cathar's Companion", "SOI", "9");
addCardInfoToList(boosterList, "Champion of Arashin", "DTK", "9");
addCardInfoToList(boosterList, "Charge", "DOM", "10");
addCardInfoToList(boosterList, "Cloudshift", "A25", "7");
addCardInfoToList(boosterList, "Coalition Honor Guard", "EMA", "6");
addCardInfoToList(boosterList, "Collar the Culprit", "GRN", "5");
addCardInfoToList(boosterList, "Congregate", "M15", "6");
addCardInfoToList(boosterList, "Court Homunculus", "MM2", "13");
addCardInfoToList(boosterList, "Darksteel Mutation", "CMA", "9");
addCardInfoToList(boosterList, "Dauntless Cathar", "SOI", "11");
addCardInfoToList(boosterList, "Dawnglare Invoker", "C15", "67");
addCardInfoToList(boosterList, "Disenchant", "M20", "14");
addCardInfoToList(boosterList, "Dismantling Blow", "MH1", "5");
addCardInfoToList(boosterList, "Djeru's Renunciation", "HOU", "11");
addCardInfoToList(boosterList, "Djeru's Resolve", "AKH", "11");
addCardInfoToList(boosterList, "Doomed Traveler", "CN2", "83");
addCardInfoToList(boosterList, "Dragon Bell Monk", "IMA", "17");
addCardInfoToList(boosterList, "Emerge Unscathed", "IMA", "19");
addCardInfoToList(boosterList, "Encampment Keeper", "XLN", "11");
addCardInfoToList(boosterList, "Encircling Fissure", "BFZ", "23");
addCardInfoToList(boosterList, "Excavation Elephant", "DOM", "17");
addCardInfoToList(boosterList, "Expedition Raptor", "BBD", "92");
addCardInfoToList(boosterList, "Exultant Skymarcher", "RIX", "7");
addCardInfoToList(boosterList, "Faithbearer Paladin", "EMN", "25");
addCardInfoToList(boosterList, "Felidar Guardian", "AER", "19");
addCardInfoToList(boosterList, "Fencing Ace", "A25", "13");
addCardInfoToList(boosterList, "Fiend Hunter", "CMA", "10");
addCardInfoToList(boosterList, "Forsake the Worldly", "AKH", "13");
addCardInfoToList(boosterList, "Fortify", "MM2", "17");
addCardInfoToList(boosterList, "Fragmentize", "KLD", "14");
addCardInfoToList(boosterList, "Geist of the Moors", "A25", "15");
addCardInfoToList(boosterList, "Gideon's Lawkeeper", "MM3", "7");
addCardInfoToList(boosterList, "Gleam of Resistance", "CN2", "87");
addCardInfoToList(boosterList, "Gods Willing", "UMA", "18");
addCardInfoToList(boosterList, "Great-Horn Krushok", "FRF", "13");
addCardInfoToList(boosterList, "Guided Strike", "IMA", "23");
addCardInfoToList(boosterList, "Healer's Hawk", "GRN", "14");
addCardInfoToList(boosterList, "Healing Grace", "DOM", "20");
addCardInfoToList(boosterList, "Heavy Infantry", "ORI", "18");
addCardInfoToList(boosterList, "Humble", "EMA", "14");
addCardInfoToList(boosterList, "Inspired Charge", "M19", "15");
addCardInfoToList(boosterList, "Intrusive Packbeast", "GRN", "17");
addCardInfoToList(boosterList, "Iona's Judgment", "IMA", "25");
addCardInfoToList(boosterList, "Jubilant Mascot", "BBD", "28");
addCardInfoToList(boosterList, "Knight of Cliffhaven", "DDP", "5");
addCardInfoToList(boosterList, "Knight of the Skyward Eye", "A25", "19");
addCardInfoToList(boosterList, "Knight of the Tusk", "M19", "18");
addCardInfoToList(boosterList, "Kor Bladewhirl", "BFZ", "34");
addCardInfoToList(boosterList, "Kor Firewalker", "A25", "21");
addCardInfoToList(boosterList, "Kor Hookmaster", "EMA", "18");
addCardInfoToList(boosterList, "Kor Sky Climber", "OGW", "24");
addCardInfoToList(boosterList, "Lieutenants of the Guard", "CN2", "16");
addCardInfoToList(boosterList, "Lightwalker", "BBD", "95");
addCardInfoToList(boosterList, "Lingering Souls", "MM3", "12");
addCardInfoToList(boosterList, "Looming Altisaur", "XLN", "23");
addCardInfoToList(boosterList, "Loyal Sentry", "A25", "22");
addCardInfoToList(boosterList, "Lunarch Mantle", "A25", "24");
addCardInfoToList(boosterList, "Midnight Guard", "BBD", "99");
addCardInfoToList(boosterList, "Momentary Blink", "MM3", "16");
addCardInfoToList(boosterList, "Moonlit Strider", "MM2", "27");
addCardInfoToList(boosterList, "Nyx-Fleece Ram", "A25", "26");
addCardInfoToList(boosterList, "Pacifism", "M20", "32");
addCardInfoToList(boosterList, "Palace Sentinels", "CN2", "19");
addCardInfoToList(boosterList, "Paladin of the Bloodstained", "XLN", "25");
addCardInfoToList(boosterList, "Path of Peace", "A25", "29");
addCardInfoToList(boosterList, "Pegasus Courser", "M19", "32");
addCardInfoToList(boosterList, "Pentarch Ward", "IMA", "27");
addCardInfoToList(boosterList, "Pitfall Trap", "MM3", "18");
addCardInfoToList(boosterList, "Pressure Point", "FRF", "21");
addCardInfoToList(boosterList, "Promise of Bunrei", "A25", "30");
addCardInfoToList(boosterList, "Rally the Peasants", "EMA", "25");
addCardInfoToList(boosterList, "Raptor Companion", "RIX", "19");
addCardInfoToList(boosterList, "Refurbish", "KLD", "25");
addCardInfoToList(boosterList, "Renewed Faith", "A25", "31");
addCardInfoToList(boosterList, "Retreat to Emeria", "BFZ", "44");
addCardInfoToList(boosterList, "Reviving Dose", "CN2", "97");
addCardInfoToList(boosterList, "Rootborn Defenses", "MM3", "21");
addCardInfoToList(boosterList, "Sacred Cat", "AKH", "27");
addCardInfoToList(boosterList, "Sanctum Gargoyle", "C16", "76");
addCardInfoToList(boosterList, "Sandstorm Charger", "DTK", "34");
addCardInfoToList(boosterList, "Seal of Cleansing", "EMA", "26");
addCardInfoToList(boosterList, "Seeker of the Way", "KTK", "22");
addCardInfoToList(boosterList, "Sensor Splicer", "MM3", "23");
addCardInfoToList(boosterList, "Seraph of the Suns", "AKH", "28");
addCardInfoToList(boosterList, "Serra Disciple", "DOM", "34");
addCardInfoToList(boosterList, "Shoulder to Shoulder", "BBD", "105");
addCardInfoToList(boosterList, "Silverchase Fox", "BBD", "106");
addCardInfoToList(boosterList, "Skyhunter Skirmisher", "MM2", "32");
addCardInfoToList(boosterList, "Slash of Talons", "XLN", "38");
addCardInfoToList(boosterList, "Soul Warden", "MM3", "24");
addCardInfoToList(boosterList, "Stalwart Aven", "IMA", "32");
addCardInfoToList(boosterList, "Star-Crowned Stag", "M19", "38");
addCardInfoToList(boosterList, "Sunlance", "MM2", "34");
addCardInfoToList(boosterList, "Sunrise Seeker", "XLN", "40");
addCardInfoToList(boosterList, "Swords to Plowshares", "C16", "78");
addCardInfoToList(boosterList, "Take Vengeance", "M19", "40");
addCardInfoToList(boosterList, "Tandem Tactics", "BBD", "112");
addCardInfoToList(boosterList, "Terashi's Grasp", "MM2", "37");
addCardInfoToList(boosterList, "Unwavering Initiate", "AKH", "36");
addCardInfoToList(boosterList, "Wake the Reflections", "MM3", "28");
addCardInfoToList(boosterList, "Wall of Omens", "EMA", "34");
addCardInfoToList(boosterList, "Wild Griffin", "CN2", "99");
addCardInfoToList(boosterList, "Youthful Knight", "MM3", "29");
addCardInfoToList(boosterList, "Zealous Strike", "CN2", "101");
}
private void addSlot3blueA(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Amass the Components", "IMA", "41");
addCardInfoToList(boosterList, "Anticipate", "M19", "44");
addCardInfoToList(boosterList, "Artificer's Assistant", "DOM", "44");
addCardInfoToList(boosterList, "Augury Owl", "PCA", "14");
addCardInfoToList(boosterList, "Befuddle", "M19", "309");
addCardInfoToList(boosterList, "Benthic Giant", "BBD", "113");
addCardInfoToList(boosterList, "Calculated Dismissal", "ORI", "48");
addCardInfoToList(boosterList, "Call to Heel", "BBD", "114");
addCardInfoToList(boosterList, "Caller of Gales", "CN2", "103");
addCardInfoToList(boosterList, "Cancel", "KTK", "33");
addCardInfoToList(boosterList, "Capture Sphere", "GRN", "31");
addCardInfoToList(boosterList, "Catalog", "SOI", "51");
addCardInfoToList(boosterList, "Chart a Course", "XLN", "48");
addCardInfoToList(boosterList, "Chillbringer", "RNA", "33");
addCardInfoToList(boosterList, "Chronostutter", "M15", "48");
addCardInfoToList(boosterList, "Circular Logic", "UMA", "47");
addCardInfoToList(boosterList, "Clear the Mind", "RNA", "34");
addCardInfoToList(boosterList, "Cloak of Mists", "GS1", "13");
addCardInfoToList(boosterList, "Cloudkin Seer", "M20", "54");
addCardInfoToList(boosterList, "Clutch of Currents", "BFZ", "72");
addCardInfoToList(boosterList, "Compelling Argument", "AKH", "47");
addCardInfoToList(boosterList, "Condescend", "IMA", "46");
addCardInfoToList(boosterList, "Containment Membrane", "OGW", "52");
addCardInfoToList(boosterList, "Contingency Plan", "EMN", "52");
addCardInfoToList(boosterList, "Contradict", "DTK", "49");
addCardInfoToList(boosterList, "Crashing Tide", "RIX", "34");
addCardInfoToList(boosterList, "Crush Dissent", "WAR", "47");
addCardInfoToList(boosterList, "Curio Vendor", "KLD", "42");
addCardInfoToList(boosterList, "Daze", "DD2", "23");
addCardInfoToList(boosterList, "Decision Paralysis", "AKH", "50");
addCardInfoToList(boosterList, "Deep Freeze", "DOM", "50");
addCardInfoToList(boosterList, "Dispel", "BFZ", "76");
addCardInfoToList(boosterList, "Displace", "EMN", "55");
addCardInfoToList(boosterList, "Drag Under", "W17", "9");
addCardInfoToList(boosterList, "Dragon's Eye Savants", "KTK", "38");
addCardInfoToList(boosterList, "Dreadwaters", "ORI", "56");
addCardInfoToList(boosterList, "Embodiment of Spring", "KTK", "39");
addCardInfoToList(boosterList, "Ensoul Artifact", "M15", "54");
addCardInfoToList(boosterList, "Everdream", "MH1", "47");
addCardInfoToList(boosterList, "Failed Inspection", "KLD", "47");
addCardInfoToList(boosterList, "Flashfreeze", "MM2", "45");
addCardInfoToList(boosterList, "Fledgling Mawcor", "DD2", "10");
addCardInfoToList(boosterList, "Fleeting Distraction", "CN2", "110");
addCardInfoToList(boosterList, "Fogwalker", "EMN", "60");
addCardInfoToList(boosterList, "Foil", "UMA", "55");
addCardInfoToList(boosterList, "Frantic Search", "UMA", "57");
addCardInfoToList(boosterList, "Frilled Sea Serpent", "M19", "56");
addCardInfoToList(boosterList, "Gaseous Form", "EMA", "51");
addCardInfoToList(boosterList, "Glint", "DTK", "55");
addCardInfoToList(boosterList, "Gone Missing", "SOI", "67");
addCardInfoToList(boosterList, "Grasp of Phantoms", "MM3", "41");
addCardInfoToList(boosterList, "Guard Gomazoa", "PCA", "17");
addCardInfoToList(boosterList, "Gurmag Drowner", "DTK", "57");
addCardInfoToList(boosterList, "Gush", "DD2", "27");
addCardInfoToList(boosterList, "Hightide Hermit", "KLD", "51");
addCardInfoToList(boosterList, "Hinterland Drake", "AER", "34");
addCardInfoToList(boosterList, "Humongulus", "RNA", "41");
addCardInfoToList(boosterList, "Inkfathom Divers", "DDT", "8");
addCardInfoToList(boosterList, "Invisibility", "M15", "61");
addCardInfoToList(boosterList, "Jeering Homunculus", "CN2", "33");
addCardInfoToList(boosterList, "Jeskai Sage", "FRF", "38");
addCardInfoToList(boosterList, "Kiora's Dambreaker", "WAR", "58");
addCardInfoToList(boosterList, "Laboratory Brute", "EMN", "67");
addCardInfoToList(boosterList, "Laboratory Maniac", "UMA", "61");
addCardInfoToList(boosterList, "Labyrinth Guardian", "AKH", "60");
addCardInfoToList(boosterList, "Messenger Jays", "CN2", "35");
addCardInfoToList(boosterList, "Mind Sculpt", "M15", "70");
addCardInfoToList(boosterList, "Mist Raven", "DDQ", "26");
addCardInfoToList(boosterList, "Mnemonic Wall", "IMA", "67");
addCardInfoToList(boosterList, "Monastery Loremaster", "DTK", "63");
addCardInfoToList(boosterList, "Murder of Crows", "A25", "66");
addCardInfoToList(boosterList, "Nagging Thoughts", "SOI", "74");
addCardInfoToList(boosterList, "Niblis of Dusk", "SOI", "76");
addCardInfoToList(boosterList, "Nine-Tail White Fox", "GS1", "8");
addCardInfoToList(boosterList, "Ojutai's Breath", "DTK", "67");
addCardInfoToList(boosterList, "Phyrexian Ingester", "EMA", "66");
addCardInfoToList(boosterList, "Pondering Mage", "MH1", "63");
addCardInfoToList(boosterList, "Predict", "C18", "98");
addCardInfoToList(boosterList, "Purple-Crystal Crab", "GS1", "3");
addCardInfoToList(boosterList, "Refocus", "FRF", "47");
addCardInfoToList(boosterList, "Riftwing Cloudskate", "DD2", "15");
addCardInfoToList(boosterList, "River Darter", "RIX", "47");
addCardInfoToList(boosterList, "Sailor of Means", "RIX", "49");
addCardInfoToList(boosterList, "Scroll Thief", "DDT", "17");
addCardInfoToList(boosterList, "Send to Sleep", "ORI", "71");
addCardInfoToList(boosterList, "Shipwreck Looter", "XLN", "76");
addCardInfoToList(boosterList, "Silent Observer", "SOI", "86");
addCardInfoToList(boosterList, "Silvergill Adept", "RIX", "53");
addCardInfoToList(boosterList, "Singing Bell Strike", "KTK", "55");
addCardInfoToList(boosterList, "Skaab Goliath", "ORI", "74");
addCardInfoToList(boosterList, "Skitter Eel", "RNA", "53");
addCardInfoToList(boosterList, "Sleep", "M19", "74");
addCardInfoToList(boosterList, "Slipstream Eel", "CM2", "49");
addCardInfoToList(boosterList, "Slither Blade", "AKH", "71");
addCardInfoToList(boosterList, "Sphinx's Tutelage", "ORI", "76");
addCardInfoToList(boosterList, "Stream of Thought", "MH1", "71");
addCardInfoToList(boosterList, "Surrakar Banisher", "DDO", "43");
addCardInfoToList(boosterList, "Syr Elenora, the Discerning", "ELD", "67");
addCardInfoToList(boosterList, "Thought Collapse", "RNA", "57");
addCardInfoToList(boosterList, "Thunder Drake", "WAR", "73");
addCardInfoToList(boosterList, "Tidal Warrior", "DDT", "20");
addCardInfoToList(boosterList, "Trail of Evidence", "SOI", "93");
addCardInfoToList(boosterList, "Treasure Cruise", "KTK", "59");
addCardInfoToList(boosterList, "Treasure Mage", "DDU", "40");
addCardInfoToList(boosterList, "Trinket Mage", "DDU", "41");
addCardInfoToList(boosterList, "Turn Aside", "EMN", "78");
addCardInfoToList(boosterList, "Uncomfortable Chill", "M19", "82");
addCardInfoToList(boosterList, "Wall of Frost", "MM3", "56");
addCardInfoToList(boosterList, "Warden of Evos Isle", "EMA", "76");
addCardInfoToList(boosterList, "Watercourser", "BBD", "137");
addCardInfoToList(boosterList, "Weldfast Wingsmith", "KLD", "69");
addCardInfoToList(boosterList, "Welkin Tern", "GS1", "5");
addCardInfoToList(boosterList, "Wind Drake", "KLD", "70");
addCardInfoToList(boosterList, "Wind Strider", "XLN", "88");
addCardInfoToList(boosterList, "Wind-Kin Raiders", "AER", "50");
addCardInfoToList(boosterList, "Windcaller Aven", "MH1", "77");
addCardInfoToList(boosterList, "Wishcoin Crab", "GRN", "60");
addCardInfoToList(boosterList, "Wishful Merfolk", "ELD", "73");
addCardInfoToList(boosterList, "Wretched Gryff", "EMN", "12");
addCardInfoToList(boosterList, "Write into Being", "FRF", "59");
addCardInfoToList(boosterList, "Youthful Scholar", "DTK", "84");
}
private void addSlot4blueB(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Academy Journeymage", "DOM", "41");
addCardInfoToList(boosterList, "Aether Tradewinds", "KLD", "38");
addCardInfoToList(boosterList, "Aethersnipe", "MM2", "39");
addCardInfoToList(boosterList, "Amphin Pathmage", "M15", "45");
addCardInfoToList(boosterList, "Arcane Denial", "CMA", "30");
addCardInfoToList(boosterList, "Archaeomancer", "C17", "81");
addCardInfoToList(boosterList, "Archetype of Imagination", "C18", "81");
addCardInfoToList(boosterList, "Augur of Bolas", "MM3", "30");
addCardInfoToList(boosterList, "Bastion Inventor", "AER", "30");
addCardInfoToList(boosterList, "Bewilder", "IMA", "43");
addCardInfoToList(boosterList, "Blue Elemental Blast", "A25", "43");
addCardInfoToList(boosterList, "Borrowing 100,000 Arrows", "A25", "45");
addCardInfoToList(boosterList, "Brainstorm", "A25", "46");
addCardInfoToList(boosterList, "Brilliant Spectrum", "BFZ", "70");
addCardInfoToList(boosterList, "Brine Elemental", "A25", "47");
addCardInfoToList(boosterList, "Cartouche of Knowledge", "AKH", "45");
addCardInfoToList(boosterList, "Castaway's Despair", "XLN", "281");
addCardInfoToList(boosterList, "Choking Tethers", "A25", "48");
addCardInfoToList(boosterList, "Citywatch Sphinx", "GRN", "33");
addCardInfoToList(boosterList, "Claustrophobia", "DDT", "3");
addCardInfoToList(boosterList, "Cloud Elemental", "MM2", "42");
addCardInfoToList(boosterList, "Cloudreader Sphinx", "DOM", "47");
addCardInfoToList(boosterList, "Concentrate", "E02", "9");
addCardInfoToList(boosterList, "Convolute", "EMN", "53");
addCardInfoToList(boosterList, "Coral Trickster", "DDN", "44");
addCardInfoToList(boosterList, "Coralhelm Guide", "BBD", "116");
addCardInfoToList(boosterList, "Counterspell", "A25", "50");
addCardInfoToList(boosterList, "Court Hussar", "A25", "51");
addCardInfoToList(boosterList, "Curiosity", "A25", "52");
addCardInfoToList(boosterList, "Dazzling Lights", "GRN", "34");
addCardInfoToList(boosterList, "Deep Analysis", "EMA", "45");
addCardInfoToList(boosterList, "Diminish", "IMA", "50");
addCardInfoToList(boosterList, "Dirgur Nemesis", "DTK", "51");
addCardInfoToList(boosterList, "Distortion Strike", "IMA", "52");
addCardInfoToList(boosterList, "Divination", "M19", "51");
addCardInfoToList(boosterList, "Doorkeeper", "IMA", "53");
addCardInfoToList(boosterList, "Dream Cache", "C18", "88");
addCardInfoToList(boosterList, "Dream Twist", "EMA", "47");
addCardInfoToList(boosterList, "Eel Umbra", "C18", "89");
addCardInfoToList(boosterList, "Enlightened Maniac", "EMN", "58");
addCardInfoToList(boosterList, "Errant Ephemeron", "DD2", "20");
addCardInfoToList(boosterList, "Essence Scatter", "M19", "54");
addCardInfoToList(boosterList, "Exclude", "MH1", "48");
addCardInfoToList(boosterList, "Fact or Fiction", "CM2", "42");
addCardInfoToList(boosterList, "Faerie Invaders", "DDN", "57");
addCardInfoToList(boosterList, "Faerie Mechanist", "DDU", "38");
addCardInfoToList(boosterList, "Fascination", "FRF", "34");
addCardInfoToList(boosterList, "Fathom Seer", "C14", "109");
addCardInfoToList(boosterList, "Fog Bank", "BBD", "117");
addCardInfoToList(boosterList, "Forbidden Alchemy", "MM3", "38");
addCardInfoToList(boosterList, "Frost Lynx", "BBD", "118");
addCardInfoToList(boosterList, "Ghost Ship", "A25", "60");
addCardInfoToList(boosterList, "Glacial Crasher", "M15", "57");
addCardInfoToList(boosterList, "Hieroglyphic Illumination", "AKH", "57");
addCardInfoToList(boosterList, "Horseshoe Crab", "A25", "61");
addCardInfoToList(boosterList, "Impulse", "BBD", "119");
addCardInfoToList(boosterList, "Ior Ruin Expedition", "E01", "25");
addCardInfoToList(boosterList, "Jace's Phantasm", "IMA", "60");
addCardInfoToList(boosterList, "Jwar Isle Avenger", "OGW", "58");
addCardInfoToList(boosterList, "Lay Claim", "AKH", "61");
addCardInfoToList(boosterList, "Leapfrog", "GRN", "42");
addCardInfoToList(boosterList, "Mahamoti Djinn", "IMA", "64");
addCardInfoToList(boosterList, "Man-o'-War", "A25", "64");
addCardInfoToList(boosterList, "Mana Leak", "DDN", "64");
addCardInfoToList(boosterList, "Maximize Altitude", "GRN", "43");
addCardInfoToList(boosterList, "Memory Lapse", "EMA", "60");
addCardInfoToList(boosterList, "Merfolk Looter", "DDT", "10");
addCardInfoToList(boosterList, "Metallic Rebuke", "AER", "39");
addCardInfoToList(boosterList, "Mulldrifter", "CM2", "47");
addCardInfoToList(boosterList, "Mystic of the Hidden Way", "A25", "67");
addCardInfoToList(boosterList, "Mystical Teachings", "MM3", "44");
addCardInfoToList(boosterList, "Negate", "RIX", "44");
addCardInfoToList(boosterList, "Ninja of the Deep Hours", "C18", "95");
addCardInfoToList(boosterList, "Ojutai Interceptor", "DTK", "66");
addCardInfoToList(boosterList, "Omenspeaker", "BBD", "125");
addCardInfoToList(boosterList, "Opportunity", "C17", "89");
addCardInfoToList(boosterList, "Opt", "DOM", "60");
addCardInfoToList(boosterList, "Peel from Reality", "DDO", "40");
addCardInfoToList(boosterList, "Phantasmal Bear", "A25", "69");
addCardInfoToList(boosterList, "Portent", "C18", "97");
addCardInfoToList(boosterList, "Preordain", "C15", "101");
addCardInfoToList(boosterList, "Prodigal Sorcerer", "EMA", "67");
addCardInfoToList(boosterList, "Propaganda", "C16", "94");
addCardInfoToList(boosterList, "Prosperous Pirates", "XLN", "69");
addCardInfoToList(boosterList, "Repulse", "CN2", "119");
addCardInfoToList(boosterList, "Retraction Helix", "A25", "71");
addCardInfoToList(boosterList, "Ringwarden Owl", "ORI", "68");
addCardInfoToList(boosterList, "River Serpent", "AKH", "66");
addCardInfoToList(boosterList, "Riverwheel Aerialists", "IMA", "71");
addCardInfoToList(boosterList, "Sage of Lat-Nam", "DOM", "64");
addCardInfoToList(boosterList, "Sea Gate Oracle", "C17", "92");
addCardInfoToList(boosterList, "Sealock Monster", "DDO", "42");
addCardInfoToList(boosterList, "Secrets of the Golden City", "RIX", "52");
addCardInfoToList(boosterList, "Shaper Parasite", "C14", "125");
addCardInfoToList(boosterList, "Shimmerscale Drake", "AKH", "70");
addCardInfoToList(boosterList, "Sigiled Starfish", "ORI", "73");
addCardInfoToList(boosterList, "Skittering Crustacean", "CN2", "36");
addCardInfoToList(boosterList, "Snap", "DDS", "10");
addCardInfoToList(boosterList, "Snapping Drake", "M19", "75");
addCardInfoToList(boosterList, "Somber Hoverguard", "MM2", "57");
addCardInfoToList(boosterList, "Spire Monitor", "MM3", "52");
addCardInfoToList(boosterList, "Steady Progress", "MM2", "58");
addCardInfoToList(boosterList, "Stitched Drake", "DDQ", "49");
addCardInfoToList(boosterList, "Storm Sculptor", "XLN", "85");
addCardInfoToList(boosterList, "Strategic Planning", "HOU", "47");
addCardInfoToList(boosterList, "Syncopate", "DOM", "67");
addCardInfoToList(boosterList, "Tandem Lookout", "MM3", "53");
addCardInfoToList(boosterList, "Temporal Fissure", "DDS", "12");
addCardInfoToList(boosterList, "Thornwind Faeries", "CMA", "42");
addCardInfoToList(boosterList, "Thought Scour", "IMA", "76");
addCardInfoToList(boosterList, "Thoughtcast", "MM2", "64");
addCardInfoToList(boosterList, "Thrummingbird", "CM2", "52");
addCardInfoToList(boosterList, "Tidal Wave", "EMA", "75");
addCardInfoToList(boosterList, "Totally Lost", "BBD", "135");
addCardInfoToList(boosterList, "Treasure Hunt", "C18", "109");
addCardInfoToList(boosterList, "Triton Tactics", "DDT", "23");
addCardInfoToList(boosterList, "Vapor Snag", "MM2", "66");
addCardInfoToList(boosterList, "Vigean Graftmage", "MM2", "68");
addCardInfoToList(boosterList, "Wave-Wing Elemental", "BFZ", "88");
addCardInfoToList(boosterList, "Whiplash Trap", "DDN", "70");
addCardInfoToList(boosterList, "Windrider Eel", "E01", "30");
}
private void addSlot5blackA(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Aid the Fallen", "WAR", "76");
addCardInfoToList(boosterList, "Alesha's Vanguard", "FRF", "60");
addCardInfoToList(boosterList, "Alley Strangler", "AER", "52");
addCardInfoToList(boosterList, "Ambitious Aetherborn", "KLD", "72");
addCardInfoToList(boosterList, "Ancestral Vengeance", "FRF", "61");
addCardInfoToList(boosterList, "Annihilate", "EMA", "79");
addCardInfoToList(boosterList, "Bala Ged Scorpion", "IMA", "79");
addCardInfoToList(boosterList, "Bitter Revelation", "KTK", "65");
addCardInfoToList(boosterList, "Bladebrand", "M20", "86");
addCardInfoToList(boosterList, "Blighted Bat", "AKH", "80");
addCardInfoToList(boosterList, "Blistergrub", "DDR", "40");
addCardInfoToList(boosterList, "Bone Splinters", "M20", "92");
addCardInfoToList(boosterList, "Boon of Emrakul", "EMN", "81");
addCardInfoToList(boosterList, "Breeding Pit", "DDC", "53");
addCardInfoToList(boosterList, "Butcher's Glee", "IMA", "84");
addCardInfoToList(boosterList, "Cabal Therapy", "EMA", "83");
addCardInfoToList(boosterList, "Cackling Imp", "DDC", "41");
addCardInfoToList(boosterList, "Cadaver Imp", "DDR", "41");
addCardInfoToList(boosterList, "Catacomb Slug", "ORI", "86");
addCardInfoToList(boosterList, "Certain Death", "EMN", "84");
addCardInfoToList(boosterList, "Coat with Venom", "DTK", "91");
addCardInfoToList(boosterList, "Corpsehatch", "DDP", "50");
addCardInfoToList(boosterList, "Covenant of Blood", "M15", "91");
addCardInfoToList(boosterList, "Crow of Dark Tidings", "SOI", "105");
addCardInfoToList(boosterList, "Dark Dabbling", "ORI", "89");
addCardInfoToList(boosterList, "Dark Withering", "C19", "110");
addCardInfoToList(boosterList, "Darkblast", "GK1", "51");
addCardInfoToList(boosterList, "Dead Reveler", "IMA", "86");
addCardInfoToList(boosterList, "Deadeye Tormentor", "XLN", "98");
addCardInfoToList(boosterList, "Defeat", "DTK", "97");
addCardInfoToList(boosterList, "Demon's Grasp", "BFZ", "108");
addCardInfoToList(boosterList, "Demonic Tutor", "DDC", "49");
addCardInfoToList(boosterList, "Demonic Vigor", "DOM", "85");
addCardInfoToList(boosterList, "Dismember", "MM2", "79");
addCardInfoToList(boosterList, "Disowned Ancestor", "KTK", "70");
addCardInfoToList(boosterList, "Doomed Dissenter", "AKH", "87");
addCardInfoToList(boosterList, "Douse in Gloom", "FRF", "68");
addCardInfoToList(boosterList, "Dread Return", "DDQ", "55");
addCardInfoToList(boosterList, "Dregscape Zombie", "DDN", "5");
addCardInfoToList(boosterList, "Dukhara Scavenger", "KLD", "77");
addCardInfoToList(boosterList, "Dune Beetle", "AKH", "89");
addCardInfoToList(boosterList, "Duress", "EMA", "86");
addCardInfoToList(boosterList, "Farbog Revenant", "SOI", "110");
addCardInfoToList(boosterList, "Fetid Imp", "ORI", "97");
addCardInfoToList(boosterList, "First-Sphere Gargantua", "MH1", "91");
addCardInfoToList(boosterList, "Flesh to Dust", "ORI", "280");
addCardInfoToList(boosterList, "Fretwork Colony", "KLD", "83");
addCardInfoToList(boosterList, "Genju of the Fens", "DDD", "47");
addCardInfoToList(boosterList, "Ghoulcaller's Accomplice", "SOI", "112");
addCardInfoToList(boosterList, "Grasping Scoundrel", "RIX", "74");
addCardInfoToList(boosterList, "Gravepurge", "DTK", "104");
addCardInfoToList(boosterList, "Grim Discovery", "DDR", "51");
addCardInfoToList(boosterList, "Hideous End", "DDR", "52");
addCardInfoToList(boosterList, "Induce Despair", "DDP", "53");
addCardInfoToList(boosterList, "Infernal Scarring", "ORI", "102");
addCardInfoToList(boosterList, "Infest", "CN2", "139");
addCardInfoToList(boosterList, "Instill Infection", "MM2", "85");
addCardInfoToList(boosterList, "Kalastria Nightwatch", "BFZ", "115");
addCardInfoToList(boosterList, "Krumar Bond-Kin", "KTK", "77");
addCardInfoToList(boosterList, "Lazotep Behemoth", "WAR", "95");
addCardInfoToList(boosterList, "Macabre Waltz", "SOI", "121");
addCardInfoToList(boosterList, "Marauding Boneslasher", "HOU", "70");
addCardInfoToList(boosterList, "Mark of the Vampire", "UMA", "105");
addCardInfoToList(boosterList, "Marsh Hulk", "DTK", "109");
addCardInfoToList(boosterList, "Merciless Resolve", "SOI", "123");
addCardInfoToList(boosterList, "Miasmic Mummy", "AKH", "100");
addCardInfoToList(boosterList, "Mind Rake", "MH1", "96");
addCardInfoToList(boosterList, "Mire's Malice", "BFZ", "117");
addCardInfoToList(boosterList, "Murder", "M20", "109");
addCardInfoToList(boosterList, "Murderous Compulsion", "SOI", "126");
addCardInfoToList(boosterList, "Nantuko Husk", "ORI", "109");
addCardInfoToList(boosterList, "Never Happened", "GRN", "80");
addCardInfoToList(boosterList, "Nirkana Assassin", "BFZ", "118");
addCardInfoToList(boosterList, "Plaguecrafter", "GRN", "82");
addCardInfoToList(boosterList, "Prowling Pangolin", "EMA", "104");
addCardInfoToList(boosterList, "Rakshasa's Secret", "KTK", "84");
addCardInfoToList(boosterList, "Read the Bones", "DDP", "56");
addCardInfoToList(boosterList, "Reaper of Night", "ELD", "102");
addCardInfoToList(boosterList, "Reassembling Skeleton", "M19", "116");
addCardInfoToList(boosterList, "Reckless Imp", "DTK", "115");
addCardInfoToList(boosterList, "Reckless Spite", "E01", "37");
addCardInfoToList(boosterList, "Returned Centaur", "ORI", "116");
addCardInfoToList(boosterList, "Revenant", "ORI", "117");
addCardInfoToList(boosterList, "Rite of the Serpent", "KTK", "86");
addCardInfoToList(boosterList, "Ruin Rat", "HOU", "75");
addCardInfoToList(boosterList, "Scrounger of Souls", "HOU", "76");
addCardInfoToList(boosterList, "Sengir Vampire", "W17", "19");
addCardInfoToList(boosterList, "Shambling Attendants", "KTK", "89");
addCardInfoToList(boosterList, "Shambling Goblin", "DTK", "118");
addCardInfoToList(boosterList, "Shriekmaw", "CMA", "68");
addCardInfoToList(boosterList, "Silumgar Butcher", "DTK", "122");
addCardInfoToList(boosterList, "Skeleton Archer", "M19", "118");
addCardInfoToList(boosterList, "Stab Wound", "M15", "116");
addCardInfoToList(boosterList, "Stallion of Ashmouth", "SOI", "136");
addCardInfoToList(boosterList, "Stinkweed Imp", "GK1", "53");
addCardInfoToList(boosterList, "Stromkirk Patrol", "CN2", "149");
addCardInfoToList(boosterList, "Subtle Strike", "KLD", "100");
addCardInfoToList(boosterList, "Sultai Runemark", "FRF", "86");
addCardInfoToList(boosterList, "Tar Snare", "OGW", "90");
addCardInfoToList(boosterList, "Thallid Omnivore", "DOM", "106");
addCardInfoToList(boosterList, "The Eldest Reborn", "DOM", "90");
addCardInfoToList(boosterList, "Thornbow Archer", "ORI", "121");
addCardInfoToList(boosterList, "Thraben Foulbloods", "EMN", "108");
addCardInfoToList(boosterList, "Torment of Venom", "HOU", "79");
addCardInfoToList(boosterList, "Touch of Moonglove", "ORI", "123");
addCardInfoToList(boosterList, "Twins of Maurer Estate", "SOI", "142");
addCardInfoToList(boosterList, "Undercity's Embrace", "RNA", "89");
addCardInfoToList(boosterList, "Untamed Hunger", "OGW", "91");
addCardInfoToList(boosterList, "Unyielding Krumar", "KTK", "94");
addCardInfoToList(boosterList, "Vampire Champion", "RIX", "198");
addCardInfoToList(boosterList, "Vampire Envoy", "OGW", "92");
addCardInfoToList(boosterList, "Vampire Nighthawk", "E02", "23");
addCardInfoToList(boosterList, "Vessel of Malignity", "SOI", "144");
addCardInfoToList(boosterList, "Voracious Null", "BFZ", "125");
addCardInfoToList(boosterList, "Vraska's Finisher", "WAR", "112");
addCardInfoToList(boosterList, "Walk the Plank", "XLN", "130");
addCardInfoToList(boosterList, "Warteye Witch", "MH1", "115");
addCardInfoToList(boosterList, "Weight of the Underworld", "ORI", "126");
addCardInfoToList(boosterList, "Weirded Vampire", "EMN", "113");
addCardInfoToList(boosterList, "Yargle, Glutton of Urborg", "DOM", "113");
addCardInfoToList(boosterList, "Zulaport Chainmage", "OGW", "93");
}
private void addSlot6blackB(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Absorb Vis", "CN2", "126");
addCardInfoToList(boosterList, "Accursed Spirit", "M15", "85");
addCardInfoToList(boosterList, "Altar's Reap", "DDR", "37");
addCardInfoToList(boosterList, "Animate Dead", "EMA", "78");
addCardInfoToList(boosterList, "Baleful Ammit", "AKH", "79");
addCardInfoToList(boosterList, "Balustrade Spy", "IMA", "80");
addCardInfoToList(boosterList, "Bartizan Bats", "GRN", "62");
addCardInfoToList(boosterList, "Black Cat", "M15", "86");
addCardInfoToList(boosterList, "Blessing of Belzenlok", "DOM", "77");
addCardInfoToList(boosterList, "Blightsoil Druid", "EMA", "80");
addCardInfoToList(boosterList, "Blood Artist", "C17", "99");
addCardInfoToList(boosterList, "Bloodrite Invoker", "DDP", "45");
addCardInfoToList(boosterList, "Caligo Skin-Witch", "DOM", "80");
addCardInfoToList(boosterList, "Carrion Feeder", "MH1", "81");
addCardInfoToList(boosterList, "Carrion Imp", "RNA", "66");
addCardInfoToList(boosterList, "Catacomb Crocodile", "RNA", "67");
addCardInfoToList(boosterList, "Caustic Tar", "A25", "81");
addCardInfoToList(boosterList, "Child of Night", "IMA", "85");
addCardInfoToList(boosterList, "Costly Plunder", "XLN", "96");
addCardInfoToList(boosterList, "Cower in Fear", "MM3", "62");
addCardInfoToList(boosterList, "Crippling Blight", "M15", "92");
addCardInfoToList(boosterList, "Cursed Minotaur", "AKH", "85");
addCardInfoToList(boosterList, "Daring Demolition", "AER", "55");
addCardInfoToList(boosterList, "Dark Ritual", "A25", "82");
addCardInfoToList(boosterList, "Deadbridge Shaman", "EMA", "85");
addCardInfoToList(boosterList, "Death Denied", "MM2", "76");
addCardInfoToList(boosterList, "Desperate Castaways", "XLN", "101");
addCardInfoToList(boosterList, "Diabolic Edict", "A25", "85");
addCardInfoToList(boosterList, "Die Young", "KLD", "76");
addCardInfoToList(boosterList, "Dinosaur Hunter", "RIX", "67");
addCardInfoToList(boosterList, "Dirge of Dread", "A25", "86");
addCardInfoToList(boosterList, "Dread Drone", "MM2", "80");
addCardInfoToList(boosterList, "Dreadbringer Lampads", "C15", "122");
addCardInfoToList(boosterList, "Driver of the Dead", "CN2", "133");
addCardInfoToList(boosterList, "Drudge Sentinel", "DOM", "89");
addCardInfoToList(boosterList, "Dusk Charger", "RIX", "69");
addCardInfoToList(boosterList, "Dusk Legion Zealot", "RIX", "70");
addCardInfoToList(boosterList, "Epicure of Blood", "M19", "95");
addCardInfoToList(boosterList, "Erg Raiders", "A25", "90");
addCardInfoToList(boosterList, "Eternal Thirst", "IMA", "89");
addCardInfoToList(boosterList, "Evincar's Justice", "CMA", "58");
addCardInfoToList(boosterList, "Executioner's Capsule", "C16", "109");
addCardInfoToList(boosterList, "Eyeblight's Ending", "EMA", "88");
addCardInfoToList(boosterList, "Fallen Angel", "A25", "91");
addCardInfoToList(boosterList, "Fatal Push", "AER", "57");
addCardInfoToList(boosterList, "Fen Hauler", "AER", "58");
addCardInfoToList(boosterList, "Feral Abomination", "DOM", "92");
addCardInfoToList(boosterList, "Festercreep", "CM2", "63");
addCardInfoToList(boosterList, "Festering Newt", "IMA", "90");
addCardInfoToList(boosterList, "Fill with Fright", "BBD", "144");
addCardInfoToList(boosterList, "Fungal Infection", "DOM", "94");
addCardInfoToList(boosterList, "Ghostly Changeling", "MM2", "83");
addCardInfoToList(boosterList, "Gifted Aetherborn", "AER", "61");
addCardInfoToList(boosterList, "Go for the Throat", "C17", "114");
addCardInfoToList(boosterList, "Gravedigger", "CM2", "66");
addCardInfoToList(boosterList, "Gray Merchant of Asphodel", "C14", "146");
addCardInfoToList(boosterList, "Grim Affliction", "MM2", "84");
addCardInfoToList(boosterList, "Grixis Slavedriver", "MM3", "74");
addCardInfoToList(boosterList, "Grotesque Mutation", "BBD", "145");
addCardInfoToList(boosterList, "Gruesome Fate", "RIX", "75");
addCardInfoToList(boosterList, "Gurmag Angler", "UMA", "102");
addCardInfoToList(boosterList, "Hired Blade", "M19", "100");
addCardInfoToList(boosterList, "Hound of the Farbogs", "SOI", "117");
addCardInfoToList(boosterList, "Innocent Blood", "EMA", "94");
addCardInfoToList(boosterList, "Inquisition of Kozilek", "MM3", "75");
addCardInfoToList(boosterList, "Lawless Broker", "KLD", "86");
addCardInfoToList(boosterList, "Lethal Sting", "HOU", "67");
addCardInfoToList(boosterList, "Lord of the Accursed", "AKH", "99");
addCardInfoToList(boosterList, "March of the Drowned", "XLN", "112");
addCardInfoToList(boosterList, "Mephitic Vapors", "GRN", "76");
addCardInfoToList(boosterList, "Mind Rot", "W16", "7");
addCardInfoToList(boosterList, "Moment of Craving", "RIX", "79");
addCardInfoToList(boosterList, "Nameless Inversion", "MM2", "87");
addCardInfoToList(boosterList, "Night's Whisper", "EMA", "100");
addCardInfoToList(boosterList, "Noxious Dragon", "FRF", "77");
addCardInfoToList(boosterList, "Okiba-Gang Shinobi", "PCA", "35");
addCardInfoToList(boosterList, "Painful Lesson", "AKH", "102");
addCardInfoToList(boosterList, "Phyrexian Rager", "CMA", "62");
addCardInfoToList(boosterList, "Phyrexian Reclamation", "C15", "133");
addCardInfoToList(boosterList, "Pit Keeper", "MM3", "81");
addCardInfoToList(boosterList, "Plague Wight", "RNA", "82");
addCardInfoToList(boosterList, "Plagued Rusalka", "MM2", "89");
addCardInfoToList(boosterList, "Prakhata Club Security", "KLD", "98");
addCardInfoToList(boosterList, "Queen's Agent", "XLN", "114");
addCardInfoToList(boosterList, "Quest for the Gravelord", "BBD", "156");
addCardInfoToList(boosterList, "Rabid Bloodsucker", "ORI", "113");
addCardInfoToList(boosterList, "Rakdos Drake", "IMA", "103");
addCardInfoToList(boosterList, "Ravenous Chupacabra", "RIX", "82");
addCardInfoToList(boosterList, "Recover", "MM3", "82");
addCardInfoToList(boosterList, "Renegade Demon", "DDR", "59");
addCardInfoToList(boosterList, "Renegade's Getaway", "AER", "69");
addCardInfoToList(boosterList, "Rotfeaster Maggot", "M15", "112");
addCardInfoToList(boosterList, "Scarab Feast", "AKH", "106");
addCardInfoToList(boosterList, "Scuttling Death", "MM2", "94");
addCardInfoToList(boosterList, "Seal of Doom", "C15", "135");
addCardInfoToList(boosterList, "Shadowcloak Vampire", "M15", "113");
addCardInfoToList(boosterList, "Skeletal Scrying", "C14", "162");
addCardInfoToList(boosterList, "Skulking Ghost", "EMA", "107");
addCardInfoToList(boosterList, "Smiting Helix", "MH1", "109");
addCardInfoToList(boosterList, "Spreading Rot", "XLN", "125");
addCardInfoToList(boosterList, "Street Wraith", "A25", "108");
addCardInfoToList(boosterList, "Tavern Swindler", "BBD", "162");
addCardInfoToList(boosterList, "Tendrils of Corruption", "C14", "166");
addCardInfoToList(boosterList, "Thorn of the Black Rose", "CN2", "48");
addCardInfoToList(boosterList, "Tidy Conclusion", "KLD", "103");
addCardInfoToList(boosterList, "Tragic Slip", "C14", "167");
addCardInfoToList(boosterList, "Trespasser's Curse", "AKH", "112");
addCardInfoToList(boosterList, "Trial of Ambition", "AKH", "113");
addCardInfoToList(boosterList, "Typhoid Rats", "M15", "118");
addCardInfoToList(boosterList, "Unburden", "AKH", "114");
addCardInfoToList(boosterList, "Urborg Uprising", "EMA", "111");
addCardInfoToList(boosterList, "Vampire Hexmage", "C14", "168");
addCardInfoToList(boosterList, "Vampire Lacerator", "A25", "114");
addCardInfoToList(boosterList, "Virulent Swipe", "IMA", "113");
addCardInfoToList(boosterList, "Wake of Vultures", "EMA", "115");
addCardInfoToList(boosterList, "Walking Corpse", "M19", "126");
addCardInfoToList(boosterList, "Wander in Death", "AKH", "115");
addCardInfoToList(boosterList, "Wight of Precinct Six", "C16", "118");
addCardInfoToList(boosterList, "Will-o'-the-Wisp", "A25", "115");
addCardInfoToList(boosterList, "Windgrace Acolyte", "DOM", "112");
addCardInfoToList(boosterList, "Wrench Mind", "IMA", "115");
}
private void addSlot7redA(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Act on Impulse", "M15", "126");
addCardInfoToList(boosterList, "Ainok Tracker", "KTK", "96");
addCardInfoToList(boosterList, "Alchemist's Greeting", "EMN", "116");
addCardInfoToList(boosterList, "Ancient Grudge", "MM3", "88");
addCardInfoToList(boosterList, "Arc Trail", "PCA", "39");
addCardInfoToList(boosterList, "Arrow Storm", "KTK", "98");
addCardInfoToList(boosterList, "Azra Bladeseeker", "BBD", "55");
addCardInfoToList(boosterList, "Balduvian Horde", "A25", "120");
addCardInfoToList(boosterList, "Barrage of Boulders", "KTK", "100");
addCardInfoToList(boosterList, "Beetleback Chief", "PCA", "40");
addCardInfoToList(boosterList, "Bellows Lizard", "ORI", "132");
addCardInfoToList(boosterList, "Blastfire Bolt", "M15", "130");
addCardInfoToList(boosterList, "Blazing Volley", "AKH", "119");
addCardInfoToList(boosterList, "Blindblast", "WAR", "114");
addCardInfoToList(boosterList, "Blood Ogre", "E01", "41");
addCardInfoToList(boosterList, "Bloodfire Expert", "KTK", "101");
addCardInfoToList(boosterList, "Bloodlust Inciter", "AKH", "120");
addCardInfoToList(boosterList, "Bloodstone Goblin", "DOM", "115");
addCardInfoToList(boosterList, "Blow Your House Down", "ELD", "114");
addCardInfoToList(boosterList, "Bombard", "GNT", "37");
addCardInfoToList(boosterList, "Bomber Corps", "GK1", "80");
addCardInfoToList(boosterList, "Borrowed Hostility", "EMN", "121");
addCardInfoToList(boosterList, "Brazen Buccaneers", "XLN", "134");
addCardInfoToList(boosterList, "Brazen Wolves", "EMN", "122");
addCardInfoToList(boosterList, "Bring Low", "KTK", "103");
addCardInfoToList(boosterList, "Brute Strength", "DDT", "35");
addCardInfoToList(boosterList, "Built to Smash", "KLD", "108");
addCardInfoToList(boosterList, "Burst Lightning", "MM2", "109");
addCardInfoToList(boosterList, "Canyon Lurkers", "KTK", "105");
addCardInfoToList(boosterList, "Chandra's Pyrohelix", "WAR", "120");
addCardInfoToList(boosterList, "Charging Monstrosaur", "XLN", "138");
addCardInfoToList(boosterList, "Cobblebrute", "ORI", "138");
addCardInfoToList(boosterList, "Crowd's Favor", "M15", "138");
addCardInfoToList(boosterList, "Crown-Hunter Hireling", "CN2", "50");
addCardInfoToList(boosterList, "Curse of Opulence", "C17", "24");
addCardInfoToList(boosterList, "Destructive Tampering", "AER", "78");
addCardInfoToList(boosterList, "Direct Current", "GRN", "96");
addCardInfoToList(boosterList, "Dragon Fodder", "GNT", "39");
addCardInfoToList(boosterList, "Dynacharge", "MM3", "94");
addCardInfoToList(boosterList, "Erratic Explosion", "PCA", "41");
addCardInfoToList(boosterList, "Expedite", "BBD", "177");
addCardInfoToList(boosterList, "Falkenrath Reaver", "W17", "21");
addCardInfoToList(boosterList, "Fireball", "IMA", "128");
addCardInfoToList(boosterList, "Flame Jab", "EMA", "131");
addCardInfoToList(boosterList, "Forge Devil", "M15", "140");
addCardInfoToList(boosterList, "Foundry Street Denizen", "M15", "141");
addCardInfoToList(boosterList, "Frontline Rebel", "AER", "82");
addCardInfoToList(boosterList, "Furnace Whelp", "IMA", "129");
addCardInfoToList(boosterList, "Galvanic Blast", "DDU", "45");
addCardInfoToList(boosterList, "Generator Servant", "M15", "143");
addCardInfoToList(boosterList, "Geomancer's Gambit", "MH1", "125");
addCardInfoToList(boosterList, "Ghitu Lavarunner", "DOM", "127");
addCardInfoToList(boosterList, "Giant Spectacle", "KLD", "116");
addCardInfoToList(boosterList, "Goblin Assault", "MM3", "95");
addCardInfoToList(boosterList, "Goblin Bombardment", "DDN", "24");
addCardInfoToList(boosterList, "Goblin Fireslinger", "MM2", "114");
addCardInfoToList(boosterList, "Goblin Matron", "MH1", "129");
addCardInfoToList(boosterList, "Goblin Roughrider", "M15", "146");
addCardInfoToList(boosterList, "Goblin War Paint", "BFZ", "146");
addCardInfoToList(boosterList, "Gore Swine", "FRF", "103");
addCardInfoToList(boosterList, "Gorehorn Minotaurs", "E01", "49");
addCardInfoToList(boosterList, "Granitic Titan", "HOU", "95");
addCardInfoToList(boosterList, "Grapeshot", "DDS", "16");
addCardInfoToList(boosterList, "Gravitic Punch", "GRN", "105");
addCardInfoToList(boosterList, "Guttersnipe", "IMA", "131");
addCardInfoToList(boosterList, "Hammerhand", "IMA", "132");
addCardInfoToList(boosterList, "Hardened Berserker", "DTK", "139");
addCardInfoToList(boosterList, "Hyena Pack", "AKH", "139");
addCardInfoToList(boosterList, "Ill-Tempered Cyclops", "CN2", "166");
addCardInfoToList(boosterList, "Impact Tremors", "DTK", "140");
addCardInfoToList(boosterList, "Incorrigible Youths", "SOI", "166");
addCardInfoToList(boosterList, "Inferno Fist", "M15", "150");
addCardInfoToList(boosterList, "Inferno Jet", "HOU", "99");
addCardInfoToList(boosterList, "Ingot Chewer", "CM2", "110");
addCardInfoToList(boosterList, "Keldon Halberdier", "IMA", "135");
addCardInfoToList(boosterList, "Kiln Fiend", "IMA", "137");
addCardInfoToList(boosterList, "Krenko's Enforcer", "M15", "152");
addCardInfoToList(boosterList, "Leaping Master", "KTK", "114");
addCardInfoToList(boosterList, "Leopard-Spotted Jiao", "GS1", "23");
addCardInfoToList(boosterList, "Madcap Skills", "MM3", "99");
addCardInfoToList(boosterList, "Mardu Warshrieker", "KTK", "117");
addCardInfoToList(boosterList, "Maximize Velocity", "GRN", "111");
addCardInfoToList(boosterList, "Miner's Bane", "M15", "157");
addCardInfoToList(boosterList, "Mogg Flunkies", "MM3", "102");
addCardInfoToList(boosterList, "Molten Rain", "MM3", "103");
addCardInfoToList(boosterList, "Monastery Swiftspear", "IMA", "140");
addCardInfoToList(boosterList, "Ondu Champion", "BFZ", "149");
addCardInfoToList(boosterList, "Outnumber", "BFZ", "150");
addCardInfoToList(boosterList, "Price of Progress", "EMA", "141");
addCardInfoToList(boosterList, "Pyrotechnics", "FRF", "111");
addCardInfoToList(boosterList, "Quakefoot Cyclops", "MH1", "142");
addCardInfoToList(boosterList, "Reckless Fireweaver", "KLD", "126");
addCardInfoToList(boosterList, "Reckless Wurm", "UMA", "144");
addCardInfoToList(boosterList, "Rivals' Duel", "PCA", "51");
addCardInfoToList(boosterList, "Ruinous Gremlin", "KLD", "128");
addCardInfoToList(boosterList, "Samut's Sprint", "WAR", "142");
addCardInfoToList(boosterList, "Sarkhan's Rage", "DTK", "153");
addCardInfoToList(boosterList, "Screamreach Brawler", "DTK", "155");
addCardInfoToList(boosterList, "Seismic Shift", "DOM", "141");
addCardInfoToList(boosterList, "Shattering Spree", "GK1", "34");
addCardInfoToList(boosterList, "Shenanigans", "MH1", "146");
addCardInfoToList(boosterList, "Smelt", "M19", "158");
addCardInfoToList(boosterList, "Sparkmage Apprentice", "DDN", "48");
addCardInfoToList(boosterList, "Sparkspitter", "UMA", "149");
addCardInfoToList(boosterList, "Staggershock", "IMA", "147");
addCardInfoToList(boosterList, "Stormblood Berserker", "E01", "58");
addCardInfoToList(boosterList, "Swift Kick", "KTK", "122");
addCardInfoToList(boosterList, "Tectonic Rift", "M19", "162");
addCardInfoToList(boosterList, "Temur Battle Rage", "FRF", "116");
addCardInfoToList(boosterList, "Thrill of Possibility", "ELD", "146");
addCardInfoToList(boosterList, "Tibalt's Rager", "WAR", "147");
addCardInfoToList(boosterList, "Torch Courier", "GRN", "119");
addCardInfoToList(boosterList, "Valakut Invoker", "BFZ", "159");
addCardInfoToList(boosterList, "Valakut Predator", "BFZ", "160");
addCardInfoToList(boosterList, "Valley Dasher", "KTK", "125");
addCardInfoToList(boosterList, "Vandalize", "DTK", "165");
addCardInfoToList(boosterList, "Volcanic Dragon", "M19", "167");
addCardInfoToList(boosterList, "Volcanic Rush", "DTK", "166");
addCardInfoToList(boosterList, "Wall of Fire", "M15", "167");
addCardInfoToList(boosterList, "Wayward Giant", "KLD", "139");
addCardInfoToList(boosterList, "Wojek Bodyguard", "GRN", "120");
}
private void addSlot8redB(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Act of Treason", "RNA", "91");
addCardInfoToList(boosterList, "Ahn-Crop Crasher", "AKH", "117");
addCardInfoToList(boosterList, "Akroan Sergeant", "ORI", "130");
addCardInfoToList(boosterList, "Anger", "UMA", "122");
addCardInfoToList(boosterList, "Atarka Efreet", "DTK", "128");
addCardInfoToList(boosterList, "Avarax", "EMA", "117");
addCardInfoToList(boosterList, "Barging Sergeant", "GRN", "92");
addCardInfoToList(boosterList, "Battle Rampart", "BBD", "165");
addCardInfoToList(boosterList, "Battle-Rattle Shaman", "E01", "40");
addCardInfoToList(boosterList, "Blades of Velis Vel", "MM2", "105");
addCardInfoToList(boosterList, "Bloodmad Vampire", "SOI", "146");
addCardInfoToList(boosterList, "Blur of Blades", "HOU", "84");
addCardInfoToList(boosterList, "Boggart Brute", "ORI", "133");
addCardInfoToList(boosterList, "Boiling Earth", "BFZ", "142");
addCardInfoToList(boosterList, "Boulder Salvo", "OGW", "102");
addCardInfoToList(boosterList, "Browbeat", "A25", "123");
addCardInfoToList(boosterList, "Cartouche of Zeal", "AKH", "124");
addCardInfoToList(boosterList, "Cathartic Reunion", "KLD", "109");
addCardInfoToList(boosterList, "Chandra's Revolution", "AER", "77");
addCardInfoToList(boosterList, "Chartooth Cougar", "A25", "125");
addCardInfoToList(boosterList, "Cinder Hellion", "OGW", "105");
addCardInfoToList(boosterList, "Cleansing Screech", "GS1", "37");
addCardInfoToList(boosterList, "Cosmotronic Wave", "GRN", "95");
addCardInfoToList(boosterList, "Crash Through", "M19", "133");
addCardInfoToList(boosterList, "Curse of the Nightly Hunt", "CM2", "90");
addCardInfoToList(boosterList, "Death by Dragons", "CMA", "80");
addCardInfoToList(boosterList, "Defiant Ogre", "FRF", "96");
addCardInfoToList(boosterList, "Demolish", "XLN", "139");
addCardInfoToList(boosterList, "Desert Cerodon", "AKH", "128");
addCardInfoToList(boosterList, "Desperate Ravings", "C15", "149");
addCardInfoToList(boosterList, "Distemper of the Blood", "EMN", "126");
addCardInfoToList(boosterList, "Dragon Breath", "BBD", "172");
addCardInfoToList(boosterList, "Dragon Egg", "IMA", "124");
addCardInfoToList(boosterList, "Dragon Whelp", "CMA", "81");
addCardInfoToList(boosterList, "Dragonsoul Knight", "MM2", "112");
addCardInfoToList(boosterList, "Dual Shot", "SOI", "153");
addCardInfoToList(boosterList, "Earth Elemental", "BBD", "174");
addCardInfoToList(boosterList, "Emrakul's Hatcher", "DDP", "59");
addCardInfoToList(boosterList, "Enthralling Victor", "BBD", "176");
addCardInfoToList(boosterList, "Faithless Looting", "CM2", "96");
addCardInfoToList(boosterList, "Fall of the Hammer", "CM2", "97");
addCardInfoToList(boosterList, "Fervent Strike", "DOM", "117");
addCardInfoToList(boosterList, "Fierce Invocation", "FRF", "98");
addCardInfoToList(boosterList, "Fiery Hellhound", "ORI", "284");
addCardInfoToList(boosterList, "Fiery Temper", "SOI", "156");
addCardInfoToList(boosterList, "Fire Elemental", "M19", "141");
addCardInfoToList(boosterList, "Firebolt", "DDS", "37");
addCardInfoToList(boosterList, "Firebrand Archer", "HOU", "92");
addCardInfoToList(boosterList, "Flametongue Kavu", "E01", "48");
addCardInfoToList(boosterList, "Flamewave Invoker", "BBD", "178");
addCardInfoToList(boosterList, "Fling", "AKH", "132");
addCardInfoToList(boosterList, "Frenzied Raptor", "XLN", "146");
addCardInfoToList(boosterList, "Frilled Deathspitter", "RIX", "104");
addCardInfoToList(boosterList, "Frontline Devastator", "HOU", "93");
addCardInfoToList(boosterList, "Fury Charm", "IMA", "130");
addCardInfoToList(boosterList, "Genju of the Spires", "A25", "132");
addCardInfoToList(boosterList, "Goblin Balloon Brigade", "CN2", "159");
addCardInfoToList(boosterList, "Goblin Locksmith", "GRN", "104");
addCardInfoToList(boosterList, "Goblin Motivator", "M19", "143");
addCardInfoToList(boosterList, "Goblin Oriflamme", "MH1", "130");
addCardInfoToList(boosterList, "Goblin Warchief", "DOM", "130");
addCardInfoToList(boosterList, "Gut Shot", "MM2", "117");
addCardInfoToList(boosterList, "Hanweir Lancer", "MM3", "97");
addCardInfoToList(boosterList, "Hijack", "XLN", "148");
addCardInfoToList(boosterList, "Hulking Devil", "SOI", "165");
addCardInfoToList(boosterList, "Insolent Neonate", "SOI", "168");
addCardInfoToList(boosterList, "Jackal Pup", "A25", "139");
addCardInfoToList(boosterList, "Keldon Overseer", "DOM", "134");
addCardInfoToList(boosterList, "Khenra Scrapper", "HOU", "100");
addCardInfoToList(boosterList, "Kird Ape", "EMA", "137");
addCardInfoToList(boosterList, "Kolaghan Stormsinger", "DTK", "145");
addCardInfoToList(boosterList, "Krenko's Command", "DDT", "53");
addCardInfoToList(boosterList, "Lightning Bolt", "E01", "54");
addCardInfoToList(boosterList, "Lightning Javelin", "ORI", "153");
addCardInfoToList(boosterList, "Lightning Shrieker", "FRF", "106");
addCardInfoToList(boosterList, "Lightning Talons", "BBD", "180");
addCardInfoToList(boosterList, "Magma Spray", "AKH", "141");
addCardInfoToList(boosterList, "Makindi Sliderunner", "BFZ", "148");
addCardInfoToList(boosterList, "Mark of Mutiny", "PCA", "47");
addCardInfoToList(boosterList, "Mogg Fanatic", "DD1", "44");
addCardInfoToList(boosterList, "Mogg War Marshal", "EMA", "139");
addCardInfoToList(boosterList, "Mutiny", "RIX", "106");
addCardInfoToList(boosterList, "Nimble-Blade Khenra", "AKH", "145");
addCardInfoToList(boosterList, "Orcish Cannonade", "DDN", "28");
addCardInfoToList(boosterList, "Orcish Oriflamme", "EMA", "140");
addCardInfoToList(boosterList, "Pillage", "A25", "144");
addCardInfoToList(boosterList, "Prickleboar", "ORI", "158");
addCardInfoToList(boosterList, "Prophetic Ravings", "EMN", "139");
addCardInfoToList(boosterList, "Rampaging Cyclops", "DOM", "139");
addCardInfoToList(boosterList, "Renegade Tactics", "KLD", "127");
addCardInfoToList(boosterList, "Roast", "DTK", "151");
addCardInfoToList(boosterList, "Rolling Thunder", "BFZ", "154");
addCardInfoToList(boosterList, "Rubblebelt Maaka", "MM3", "109");
addCardInfoToList(boosterList, "Rummaging Goblin", "XLN", "160");
addCardInfoToList(boosterList, "Run Amok", "DOM", "140");
addCardInfoToList(boosterList, "Rush of Adrenaline", "SOI", "177");
addCardInfoToList(boosterList, "Salivating Gremlins", "KLD", "129");
addCardInfoToList(boosterList, "Seismic Stomp", "EMA", "146");
addCardInfoToList(boosterList, "Shatter", "RIX", "114");
addCardInfoToList(boosterList, "Shock", "M19", "156");
addCardInfoToList(boosterList, "Skirk Commando", "A25", "150");
addCardInfoToList(boosterList, "Skirk Prospector", "DOM", "144");
addCardInfoToList(boosterList, "Smash to Smithereens", "MM2", "124");
addCardInfoToList(boosterList, "Sparktongue Dragon", "M19", "159");
addCardInfoToList(boosterList, "Spikeshot Goblin", "A25", "152");
addCardInfoToList(boosterList, "Sulfurous Blast", "CMA", "88");
addCardInfoToList(boosterList, "Summit Prowler", "DTK", "160");
addCardInfoToList(boosterList, "Sun-Crowned Hunters", "XLN", "164");
addCardInfoToList(boosterList, "Swashbuckling", "XLN", "167");
addCardInfoToList(boosterList, "Sweatworks Brawler", "AER", "100");
addCardInfoToList(boosterList, "Tarfire", "DDT", "55");
addCardInfoToList(boosterList, "Thresher Lizard", "AKH", "150");
addCardInfoToList(boosterList, "Uncaged Fury", "A25", "155");
addCardInfoToList(boosterList, "Undying Rage", "EMA", "152");
addCardInfoToList(boosterList, "Vent Sentinel", "IMA", "153");
addCardInfoToList(boosterList, "Vessel of Volatility", "SOI", "189");
addCardInfoToList(boosterList, "Voldaren Duelist", "SOI", "191");
addCardInfoToList(boosterList, "Wildfire Emissary", "EMA", "153");
addCardInfoToList(boosterList, "Young Pyromancer", "DDS", "20");
addCardInfoToList(boosterList, "Zada's Commando", "OGW", "120");
addCardInfoToList(boosterList, "Zealot of the God-Pharaoh", "HOU", "207");
}
private void addSlot9greenA(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Affectionate Indrik", "GRN", "121");
addCardInfoToList(boosterList, "Ancestral Mask", "EMA", "157");
addCardInfoToList(boosterList, "Ancient Brontodon", "XLN", "175");
addCardInfoToList(boosterList, "Arbor Armament", "DOM", "155");
addCardInfoToList(boosterList, "Beastbreaker of Bala Ged", "DDP", "10");
addCardInfoToList(boosterList, "Become Immense", "KTK", "130");
addCardInfoToList(boosterList, "Blanchwood Armor", "M19", "169");
addCardInfoToList(boosterList, "Blastoderm", "DDD", "7");
addCardInfoToList(boosterList, "Borderland Explorer", "C18", "133");
addCardInfoToList(boosterList, "Briarhorn", "DDR", "3");
addCardInfoToList(boosterList, "Broodhunter Wurm", "BFZ", "171");
addCardInfoToList(boosterList, "Byway Courier", "SOI", "196");
addCardInfoToList(boosterList, "Centaur Courser", "M19", "171");
addCardInfoToList(boosterList, "Creeping Mold", "KLD", "150");
addCardInfoToList(boosterList, "Destructor Dragon", "FRF", "127");
addCardInfoToList(boosterList, "Domesticated Hydra", "CN2", "63");
addCardInfoToList(boosterList, "Dragon-Scarred Bear", "DTK", "183");
addCardInfoToList(boosterList, "Elemental Uprising", "OGW", "130");
addCardInfoToList(boosterList, "Elvish Fury", "MH1", "162");
addCardInfoToList(boosterList, "Eternal Witness", "UMA", "163");
addCardInfoToList(boosterList, "Feral Prowler", "HOU", "115");
addCardInfoToList(boosterList, "Fierce Empath", "DDU", "10");
addCardInfoToList(boosterList, "Frontier Mastodon", "FRF", "130");
addCardInfoToList(boosterList, "Gaea's Blessing", "DOM", "161");
addCardInfoToList(boosterList, "Gaea's Protector", "DOM", "162");
addCardInfoToList(boosterList, "Gift of Growth", "DOM", "163");
addCardInfoToList(boosterList, "Glade Watcher", "DTK", "188");
addCardInfoToList(boosterList, "Grapple with the Past", "C18", "148");
addCardInfoToList(boosterList, "Greater Basilisk", "IMA", "165");
addCardInfoToList(boosterList, "Greater Sandwurm", "AKH", "168");
addCardInfoToList(boosterList, "Hamlet Captain", "EMN", "161");
addCardInfoToList(boosterList, "Hooded Brawler", "AKH", "173");
addCardInfoToList(boosterList, "Hooting Mandrills", "KTK", "137");
addCardInfoToList(boosterList, "Jungle Delver", "XLN", "195");
addCardInfoToList(boosterList, "Jungle Wayfinder", "BBD", "72");
addCardInfoToList(boosterList, "Kin-Tree Warden", "KTK", "139");
addCardInfoToList(boosterList, "Kraul Foragers", "GRN", "135");
addCardInfoToList(boosterList, "Krosan Druid", "DOM", "167");
addCardInfoToList(boosterList, "Lead by Example", "BBD", "205");
addCardInfoToList(boosterList, "Lead the Stampede", "DDU", "16");
addCardInfoToList(boosterList, "Lifespring Druid", "BFZ", "177");
addCardInfoToList(boosterList, "Lignify", "DDD", "16");
addCardInfoToList(boosterList, "Llanowar Elves", "M19", "314");
addCardInfoToList(boosterList, "Llanowar Empath", "DDU", "18");
addCardInfoToList(boosterList, "Lure", "IMA", "175");
addCardInfoToList(boosterList, "Mantle of Webs", "ORI", "187");
addCardInfoToList(boosterList, "Map the Wastes", "FRF", "134");
addCardInfoToList(boosterList, "Mulch", "CMA", "128");
addCardInfoToList(boosterList, "Natural Connection", "DDR", "13");
addCardInfoToList(boosterList, "Naturalize", "M19", "190");
addCardInfoToList(boosterList, "Nature's Lore", "DDD", "17");
addCardInfoToList(boosterList, "Nest Invader", "PCA", "69");
addCardInfoToList(boosterList, "Nettle Sentinel", "A25", "182");
addCardInfoToList(boosterList, "New Horizons", "XLN", "198");
addCardInfoToList(boosterList, "Nimble Mongoose", "EMA", "179");
addCardInfoToList(boosterList, "Ondu Giant", "PCA", "71");
addCardInfoToList(boosterList, "Oran-Rief Invoker", "DDR", "17");
addCardInfoToList(boosterList, "Overgrown Armasaur", "RIX", "141");
addCardInfoToList(boosterList, "Pack's Favor", "GRN", "139");
addCardInfoToList(boosterList, "Penumbra Spider", "MM3", "131");
addCardInfoToList(boosterList, "Pierce the Sky", "DOM", "176");
addCardInfoToList(boosterList, "Plummet", "RIX", "143");
addCardInfoToList(boosterList, "Prey Upon", "GRN", "143");
addCardInfoToList(boosterList, "Prey's Vengeance", "IMA", "182");
addCardInfoToList(boosterList, "Pulse of Murasa", "OGW", "141");
addCardInfoToList(boosterList, "Quiet Disrepair", "PCA", "75");
addCardInfoToList(boosterList, "Rampant Growth", "DDS", "48");
addCardInfoToList(boosterList, "Ranger's Guile", "M15", "193");
addCardInfoToList(boosterList, "Ravenous Leucrocota", "CN2", "192");
addCardInfoToList(boosterList, "Reclaim", "ORI", "195");
addCardInfoToList(boosterList, "Revive", "MM3", "133");
addCardInfoToList(boosterList, "Rhox Maulers", "ORI", "196");
addCardInfoToList(boosterList, "Riparian Tiger", "KLD", "167");
addCardInfoToList(boosterList, "Roar of the Wurm", "DDS", "49");
addCardInfoToList(boosterList, "Root Out", "SOI", "224");
addCardInfoToList(boosterList, "Rosethorn Halberd", "ELD", "175");
addCardInfoToList(boosterList, "Runeclaw Bear", "M15", "197");
addCardInfoToList(boosterList, "Sagu Archer", "KTK", "146");
addCardInfoToList(boosterList, "Sakura-Tribe Elder", "C18", "160");
addCardInfoToList(boosterList, "Saproling Migration", "DOM", "178");
addCardInfoToList(boosterList, "Savage Punch", "KTK", "147");
addCardInfoToList(boosterList, "Seal of Strength", "EMA", "184");
addCardInfoToList(boosterList, "Search for Tomorrow", "IMA", "185");
addCardInfoToList(boosterList, "Seek the Horizon", "DDR", "20");
addCardInfoToList(boosterList, "Seek the Wilds", "BFZ", "189");
addCardInfoToList(boosterList, "Shape the Sands", "DTK", "205");
addCardInfoToList(boosterList, "Siege Wurm", "GRN", "144");
addCardInfoToList(boosterList, "Silhana Ledgewalker", "PCA", "77");
addCardInfoToList(boosterList, "Silkweaver Elite", "AER", "125");
addCardInfoToList(boosterList, "Snake Umbra", "C18", "162");
addCardInfoToList(boosterList, "Snapping Sailback", "XLN", "208");
addCardInfoToList(boosterList, "Spider Spawning", "CMA", "149");
addCardInfoToList(boosterList, "Stoic Builder", "SOI", "231");
addCardInfoToList(boosterList, "Strength in Numbers", "MM3", "138");
addCardInfoToList(boosterList, "Sylvan Bounty", "E01", "74");
addCardInfoToList(boosterList, "Tajuru Pathwarden", "OGW", "145");
addCardInfoToList(boosterList, "Take Down", "KLD", "170");
addCardInfoToList(boosterList, "Talons of Wildwood", "M19", "202");
addCardInfoToList(boosterList, "Territorial Baloth", "DDP", "24");
addCardInfoToList(boosterList, "Thornhide Wolves", "M19", "204");
addCardInfoToList(boosterList, "Thornweald Archer", "CMA", "154");
addCardInfoToList(boosterList, "Thrive", "MM2", "166");
addCardInfoToList(boosterList, "Timberwatch Elf", "EMA", "190");
addCardInfoToList(boosterList, "Time to Feed", "DDO", "50");
addCardInfoToList(boosterList, "Titanic Growth", "M19", "205");
addCardInfoToList(boosterList, "Tukatongue Thallid", "PCA", "79");
addCardInfoToList(boosterList, "Turntimber Basilisk", "E01", "76");
addCardInfoToList(boosterList, "Vastwood Gorger", "ORI", "204");
addCardInfoToList(boosterList, "Watcher in the Web", "SOI", "239");
addCardInfoToList(boosterList, "Wellwisher", "CMA", "166");
addCardInfoToList(boosterList, "Wild Growth", "C18", "165");
addCardInfoToList(boosterList, "Wild Mongrel", "DDD", "5");
addCardInfoToList(boosterList, "Wildsize", "IMA", "191");
addCardInfoToList(boosterList, "Wolfkin Bond", "EMN", "178");
addCardInfoToList(boosterList, "Woodborn Behemoth", "E01", "79");
addCardInfoToList(boosterList, "Woolly Loxodon", "KTK", "158");
addCardInfoToList(boosterList, "Wren's Run Vanquisher", "DD1", "19");
addCardInfoToList(boosterList, "Yavimaya Elder", "C18", "166");
addCardInfoToList(boosterList, "Yavimaya Sapherd", "DOM", "189");
addCardInfoToList(boosterList, "Yeva's Forcemage", "ORI", "208");
addCardInfoToList(boosterList, "Zendikar's Roil", "ORI", "209");
}
private void addSlot10greenB(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Abundant Growth", "EMA", "156");
addCardInfoToList(boosterList, "Acidic Slime", "C18", "127");
addCardInfoToList(boosterList, "Adventurous Impulse", "DOM", "153");
addCardInfoToList(boosterList, "Aerie Bowmasters", "DTK", "170");
addCardInfoToList(boosterList, "Aggressive Instinct", "GS1", "34");
addCardInfoToList(boosterList, "Aggressive Urge", "RIX", "122");
addCardInfoToList(boosterList, "Ainok Survivalist", "DTK", "172");
addCardInfoToList(boosterList, "Alpine Grizzly", "KTK", "127");
addCardInfoToList(boosterList, "Ambassador Oak", "A25", "158");
addCardInfoToList(boosterList, "Ancient Stirrings", "A25", "159");
addCardInfoToList(boosterList, "Arachnus Web", "MM3", "118");
addCardInfoToList(boosterList, "Arbor Elf", "A25", "160");
addCardInfoToList(boosterList, "Aura Gnarlid", "C18", "128");
addCardInfoToList(boosterList, "Avacyn's Pilgrim", "MM3", "119");
addCardInfoToList(boosterList, "Backwoods Survivalists", "EMN", "150");
addCardInfoToList(boosterList, "Baloth Gorger", "DOM", "156");
addCardInfoToList(boosterList, "Basking Rootwalla", "DDD", "2");
addCardInfoToList(boosterList, "Beast Within", "BBD", "190");
addCardInfoToList(boosterList, "Beneath the Sands", "HOU", "111");
addCardInfoToList(boosterList, "Bestial Menace", "MM2", "141");
addCardInfoToList(boosterList, "Bitterblade Warrior", "AKH", "157");
addCardInfoToList(boosterList, "Bitterbow Sharpshooters", "HOU", "112");
addCardInfoToList(boosterList, "Blossom Dryad", "XLN", "178");
addCardInfoToList(boosterList, "Borderland Ranger", "E02", "31");
addCardInfoToList(boosterList, "Bristling Boar", "M19", "170");
addCardInfoToList(boosterList, "Broken Bond", "DOM", "157");
addCardInfoToList(boosterList, "Canopy Spider", "BBD", "191");
addCardInfoToList(boosterList, "Carnivorous Moss-Beast", "M15", "170");
addCardInfoToList(boosterList, "Caustic Caterpillar", "ORI", "170");
addCardInfoToList(boosterList, "Charging Rhino", "BBD", "192");
addCardInfoToList(boosterList, "Citanul Woodreaders", "DDR", "4");
addCardInfoToList(boosterList, "Clip Wings", "SOI", "197");
addCardInfoToList(boosterList, "Colossal Dreadmaw", "XLN", "180");
addCardInfoToList(boosterList, "Combo Attack", "BBD", "67");
addCardInfoToList(boosterList, "Commune with Nature", "MM2", "142");
addCardInfoToList(boosterList, "Commune with the Gods", "EMA", "162");
addCardInfoToList(boosterList, "Conifer Strider", "DTK", "179");
addCardInfoToList(boosterList, "Crop Rotation", "DDR", "7");
addCardInfoToList(boosterList, "Crossroads Consecrator", "EMN", "154");
addCardInfoToList(boosterList, "Crowned Ceratok", "IMA", "158");
addCardInfoToList(boosterList, "Crushing Canopy", "GRN", "126");
addCardInfoToList(boosterList, "Cultivate", "C18", "138");
addCardInfoToList(boosterList, "Daggerback Basilisk", "M19", "174");
addCardInfoToList(boosterList, "Dawn's Reflection", "C18", "139");
addCardInfoToList(boosterList, "Death-Hood Cobra", "MM3", "123");
addCardInfoToList(boosterList, "Desert Twister", "CMA", "100");
addCardInfoToList(boosterList, "Dissenter's Deliverance", "AKH", "164");
addCardInfoToList(boosterList, "Dragonscale Boon", "KTK", "131");
addCardInfoToList(boosterList, "Durkwood Baloth", "IMA", "160");
addCardInfoToList(boosterList, "Earthen Arms", "BFZ", "172");
addCardInfoToList(boosterList, "Elephant Guide", "EMA", "163");
addCardInfoToList(boosterList, "Elves of Deep Shadow", "GK1", "56");
addCardInfoToList(boosterList, "Elvish Visionary", "BBD", "196");
addCardInfoToList(boosterList, "Elvish Warrior", "DD1", "5");
addCardInfoToList(boosterList, "Ember Weaver", "A25", "169");
addCardInfoToList(boosterList, "Epic Confrontation", "DTK", "185");
addCardInfoToList(boosterList, "Essence Warden", "CMA", "106");
addCardInfoToList(boosterList, "Experiment One", "C15", "184");
addCardInfoToList(boosterList, "Explore", "DDO", "45");
addCardInfoToList(boosterList, "Explosive Vegetation", "C18", "144");
addCardInfoToList(boosterList, "Ezuri's Archers", "DDU", "9");
addCardInfoToList(boosterList, "Fade into Antiquity", "CN2", "181");
addCardInfoToList(boosterList, "Farseek", "C17", "149");
addCardInfoToList(boosterList, "Feed the Clan", "KTK", "132");
addCardInfoToList(boosterList, "Feral Krushok", "FRF", "128");
addCardInfoToList(boosterList, "Ferocious Zheng", "GS1", "28");
addCardInfoToList(boosterList, "Fertile Ground", "C18", "147");
addCardInfoToList(boosterList, "Fog", "EMA", "167");
addCardInfoToList(boosterList, "Formless Nurturing", "FRF", "129");
addCardInfoToList(boosterList, "Giant Growth", "BBD", "200");
addCardInfoToList(boosterList, "Giant Spider", "AKH", "166");
addCardInfoToList(boosterList, "Gift of Paradise", "AKH", "167");
addCardInfoToList(boosterList, "Gnarlid Pack", "MM2", "144");
addCardInfoToList(boosterList, "Grazing Gladehart", "DDP", "14");
addCardInfoToList(boosterList, "Greenwood Sentinel", "M19", "187");
addCardInfoToList(boosterList, "Groundswell", "DDP", "15");
addCardInfoToList(boosterList, "Guardian Shield-Bearer", "DTK", "189");
addCardInfoToList(boosterList, "Hardy Veteran", "RIX", "132");
addCardInfoToList(boosterList, "Harmonize", "C17", "151");
addCardInfoToList(boosterList, "Harrow", "C18", "150");
addCardInfoToList(boosterList, "Hunt the Weak", "RIX", "133");
addCardInfoToList(boosterList, "Hunter's Ambush", "M15", "180");
addCardInfoToList(boosterList, "Imperious Perfect", "CMA", "118");
addCardInfoToList(boosterList, "Invigorate", "A25", "173");
addCardInfoToList(boosterList, "Ivy Lane Denizen", "DDU", "12");
addCardInfoToList(boosterList, "Kavu Climber", "A25", "175");
addCardInfoToList(boosterList, "Kavu Primarch", "MM2", "146");
addCardInfoToList(boosterList, "Khalni Heart Expedition", "C18", "154");
addCardInfoToList(boosterList, "Kozilek's Predator", "MM2", "147");
addCardInfoToList(boosterList, "Kraul Warrior", "BBD", "204");
addCardInfoToList(boosterList, "Krosan Tusker", "DDU", "14");
addCardInfoToList(boosterList, "Larger Than Life", "KLD", "160");
addCardInfoToList(boosterList, "Lay of the Land", "CN2", "185");
addCardInfoToList(boosterList, "Longshot Squad", "KTK", "140");
addCardInfoToList(boosterList, "Manglehorn", "AKH", "175");
addCardInfoToList(boosterList, "Might of the Masses", "ORI", "188");
addCardInfoToList(boosterList, "Nature's Claim", "IMA", "177");
addCardInfoToList(boosterList, "Oakgnarl Warrior", "DDR", "15");
addCardInfoToList(boosterList, "Overgrown Battlement", "IMA", "180");
addCardInfoToList(boosterList, "Overrun", "CMA", "130");
addCardInfoToList(boosterList, "Peema Outrider", "KLD", "166");
addCardInfoToList(boosterList, "Pelakka Wurm", "MM2", "154");
addCardInfoToList(boosterList, "Pinion Feast", "DTK", "195");
addCardInfoToList(boosterList, "Pouncing Cheetah", "AKH", "179");
addCardInfoToList(boosterList, "Priest of Titania", "C14", "210");
addCardInfoToList(boosterList, "Rain of Thorns", "C17", "156");
addCardInfoToList(boosterList, "Rancor", "A25", "186");
addCardInfoToList(boosterList, "Reclaiming Vines", "BFZ", "185");
addCardInfoToList(boosterList, "Regrowth", "A25", "187");
addCardInfoToList(boosterList, "Relic Crush", "CM2", "142");
addCardInfoToList(boosterList, "Return to the Earth", "BBD", "210");
addCardInfoToList(boosterList, "Roots", "EMA", "183");
addCardInfoToList(boosterList, "Scatter the Seeds", "GK1", "106");
addCardInfoToList(boosterList, "Stalking Tiger", "W17", "28");
addCardInfoToList(boosterList, "Sylvan Scrying", "BFZ", "192");
addCardInfoToList(boosterList, "Tajuru Warcaller", "BFZ", "195");
addCardInfoToList(boosterList, "Terrain Elemental", "KLD", "272");
addCardInfoToList(boosterList, "The Crowd Goes Wild", "BBD", "68");
addCardInfoToList(boosterList, "Thornscape Battlemage", "MM3", "142");
addCardInfoToList(boosterList, "Thrashing Brontodon", "RIX", "148");
addCardInfoToList(boosterList, "Venom Sliver", "M15", "205");
}
private void addSlot11multicolored(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Abzan Charm", "C16", "177");
addCardInfoToList(boosterList, "Abzan Guide", "KTK", "162");
addCardInfoToList(boosterList, "Agony Warp", "MM3", "150");
addCardInfoToList(boosterList, "Akroan Hoplite", "CN2", "197");
addCardInfoToList(boosterList, "Armadillo Cloak", "EMA", "195");
addCardInfoToList(boosterList, "Armament Corps", "KTK", "165");
addCardInfoToList(boosterList, "Azorius Charm", "IMA", "192");
addCardInfoToList(boosterList, "Azra Oddsmaker", "BBD", "75");
addCardInfoToList(boosterList, "Baleful Strix", "PCA", "82");
addCardInfoToList(boosterList, "Baloth Null", "A25", "197");
addCardInfoToList(boosterList, "Bear's Companion", "KTK", "167");
addCardInfoToList(boosterList, "Belligerent Brontodon", "XLN", "218");
addCardInfoToList(boosterList, "Bituminous Blast", "PCA", "83");
addCardInfoToList(boosterList, "Bladewing the Risen", "IMA", "193");
addCardInfoToList(boosterList, "Blightning", "A25", "198");
addCardInfoToList(boosterList, "Bloodbraid Elf", "PCA", "84");
addCardInfoToList(boosterList, "Boros Challenger", "GRN", "156");
addCardInfoToList(boosterList, "Bounding Krasis", "ORI", "212");
addCardInfoToList(boosterList, "Call of the Nightwing", "GK1", "8");
addCardInfoToList(boosterList, "Campaign of Vengeance", "EMN", "182");
addCardInfoToList(boosterList, "Cauldron Dance", "C17", "166");
addCardInfoToList(boosterList, "Citadel Castellan", "ORI", "213");
addCardInfoToList(boosterList, "Claim // Fame", "HOU", "150");
addCardInfoToList(boosterList, "Coiling Oracle", "MM3", "157");
addCardInfoToList(boosterList, "Contraband Kingpin", "KLD", "177");
addCardInfoToList(boosterList, "Corpsejack Menace", "IMA", "197");
addCardInfoToList(boosterList, "Crosis's Charm", "C17", "169");
addCardInfoToList(boosterList, "Cunning Breezedancer", "DTK", "215");
addCardInfoToList(boosterList, "Deathreap Ritual", "C18", "174");
addCardInfoToList(boosterList, "Deny Reality", "PCA", "85");
addCardInfoToList(boosterList, "Draconic Disciple", "M19", "215");
addCardInfoToList(boosterList, "Drana's Emissary", "BFZ", "210");
addCardInfoToList(boosterList, "Engineered Might", "KLD", "181");
addCardInfoToList(boosterList, "Esper Charm", "C18", "179");
addCardInfoToList(boosterList, "Ethercaste Knight", "MM2", "175");
addCardInfoToList(boosterList, "Ethereal Ambush", "FRF", "152");
addCardInfoToList(boosterList, "Extract from Darkness", "E01", "84");
addCardInfoToList(boosterList, "Fire // Ice", "UMA", "225");
addCardInfoToList(boosterList, "Fires of Yavimaya", "PCA", "92");
addCardInfoToList(boosterList, "Flame-Kin Zealot", "EMA", "201");
addCardInfoToList(boosterList, "Fusion Elemental", "PCA", "93");
addCardInfoToList(boosterList, "Gelectrode", "GK1", "38");
addCardInfoToList(boosterList, "Ghor-Clan Rampager", "MM3", "165");
addCardInfoToList(boosterList, "Giantbaiting", "MM3", "208");
addCardInfoToList(boosterList, "Gift of Orzhova", "MM3", "209");
addCardInfoToList(boosterList, "Goblin Deathraiders", "DDN", "6");
addCardInfoToList(boosterList, "Grim Contest", "FRF", "153");
addCardInfoToList(boosterList, "Gwyllion Hedge-Mage", "CMA", "201");
addCardInfoToList(boosterList, "Hammer Dropper", "GRN", "176");
addCardInfoToList(boosterList, "Hidden Stockpile", "AER", "129");
addCardInfoToList(boosterList, "Highspire Mantis", "KTK", "177");
addCardInfoToList(boosterList, "Hypothesizzle", "GRN", "178");
addCardInfoToList(boosterList, "Iroas's Champion", "ORI", "214");
addCardInfoToList(boosterList, "Join Shields", "GRN", "181");
addCardInfoToList(boosterList, "Jungle Barrier", "E02", "38");
addCardInfoToList(boosterList, "Kathari Remnant", "PCA", "98");
addCardInfoToList(boosterList, "Kin-Tree Invocation", "KTK", "183");
addCardInfoToList(boosterList, "Kiora's Follower", "DDO", "52");
addCardInfoToList(boosterList, "Kiss of the Amesha", "BBD", "225");
addCardInfoToList(boosterList, "Lawmage's Binding", "RNA", "190");
addCardInfoToList(boosterList, "Lightning Helix", "GK1", "90");
addCardInfoToList(boosterList, "Mardu Roughrider", "KTK", "187");
addCardInfoToList(boosterList, "Martial Glory", "GK1", "91");
addCardInfoToList(boosterList, "Maverick Thopterist", "C18", "185");
addCardInfoToList(boosterList, "Mercurial Geists", "EMN", "186");
addCardInfoToList(boosterList, "Migratory Route", "CM2", "161");
addCardInfoToList(boosterList, "Mistmeadow Witch", "CMA", "203");
addCardInfoToList(boosterList, "Mortify", "C18", "186");
addCardInfoToList(boosterList, "Naya Charm", "C16", "214");
addCardInfoToList(boosterList, "Nucklavee", "DDS", "26");
addCardInfoToList(boosterList, "Obelisk Spider", "HOU", "141");
addCardInfoToList(boosterList, "Ochran Assassin", "GRN", "194");
addCardInfoToList(boosterList, "Pillory of the Sleepless", "A25", "213");
addCardInfoToList(boosterList, "Plaxcaster Frogling", "MM2", "184");
addCardInfoToList(boosterList, "Pollenbright Wings", "GK1", "115");
addCardInfoToList(boosterList, "Putrefy", "GK1", "68");
addCardInfoToList(boosterList, "Qasali Pridemage", "C17", "189");
addCardInfoToList(boosterList, "Raff Capashen, Ship's Mage", "DOM", "202");
addCardInfoToList(boosterList, "Raging Swordtooth", "XLN", "226");
addCardInfoToList(boosterList, "Reclusive Artificer", "DDU", "51");
addCardInfoToList(boosterList, "Reflector Mage", "OGW", "157");
addCardInfoToList(boosterList, "Rhox War Monk", "MM3", "180");
addCardInfoToList(boosterList, "Riptide Crab", "BBD", "228");
addCardInfoToList(boosterList, "River Hoopoe", "HOU", "143");
addCardInfoToList(boosterList, "Rosemane Centaur", "GRN", "197");
addCardInfoToList(boosterList, "Rosheen Meanderer", "IMA", "206");
addCardInfoToList(boosterList, "Satyr Enchanter", "M19", "223");
addCardInfoToList(boosterList, "Savage Twister", "C18", "190");
addCardInfoToList(boosterList, "Sedraxis Specter", "MM3", "181");
addCardInfoToList(boosterList, "Selesnya Guildmage", "GK1", "119");
addCardInfoToList(boosterList, "Shambling Remains", "DDN", "12");
addCardInfoToList(boosterList, "Shardless Agent", "PCA", "104");
addCardInfoToList(boosterList, "Shipwreck Singer", "CN2", "206");
addCardInfoToList(boosterList, "Shrewd Hatchling", "MM2", "198");
addCardInfoToList(boosterList, "Skyward Eye Prophets", "CMA", "193");
addCardInfoToList(boosterList, "Slave of Bolas", "E01", "86");
addCardInfoToList(boosterList, "Soul Manipulation", "MM3", "185");
addCardInfoToList(boosterList, "Sprouting Thrinax", "MM3", "189");
addCardInfoToList(boosterList, "Stormchaser Chimera", "CN2", "207");
addCardInfoToList(boosterList, "Sultai Charm", "KTK", "204");
addCardInfoToList(boosterList, "Sultai Soothsayer", "KTK", "205");
addCardInfoToList(boosterList, "Tatyova, Benthic Druid", "DOM", "206");
addCardInfoToList(boosterList, "Terminate", "CMA", "195");
addCardInfoToList(boosterList, "Thopter Foundry", "C16", "237");
addCardInfoToList(boosterList, "Thought Erasure", "GRN", "206");
addCardInfoToList(boosterList, "Tithe Drinker", "C17", "200");
addCardInfoToList(boosterList, "Tower Gargoyle", "MM3", "196");
addCardInfoToList(boosterList, "Treacherous Terrain", "C16", "47");
addCardInfoToList(boosterList, "Underworld Coinsmith", "C15", "237");
addCardInfoToList(boosterList, "Unflinching Courage", "C18", "192");
addCardInfoToList(boosterList, "Unlicensed Disintegration", "KLD", "187");
addCardInfoToList(boosterList, "Urban Evolution", "MM3", "198");
addCardInfoToList(boosterList, "Vengeful Rebirth", "MM2", "188");
addCardInfoToList(boosterList, "Warden of the Eye", "KTK", "212");
addCardInfoToList(boosterList, "Wayfaring Temple", "MM3", "202");
addCardInfoToList(boosterList, "Weapons Trainer", "OGW", "160");
addCardInfoToList(boosterList, "Wee Dragonauts", "GRN", "214");
addCardInfoToList(boosterList, "Winding Constrictor", "AER", "140");
addCardInfoToList(boosterList, "Woolly Thoctar", "MM3", "203");
addCardInfoToList(boosterList, "Zealous Persecution", "E02", "41");
addCardInfoToList(boosterList, "Zhur-Taa Druid", "C16", "232");
}
private void addSlot12colorless(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Aether Hub", "KLD", "242");
addCardInfoToList(boosterList, "Aether Spellbomb", "MMA", "196");
addCardInfoToList(boosterList, "Akoum Refuge", "CMA", "238");
addCardInfoToList(boosterList, "Alchemist's Vial", "ORI", "220");
addCardInfoToList(boosterList, "Alloy Myr", "MM2", "201");
addCardInfoToList(boosterList, "Arcane Sanctum", "C18", "232");
addCardInfoToList(boosterList, "Armillary Sphere", "C17", "203");
addCardInfoToList(boosterList, "Artisan of Kozilek", "CM2", "14");
addCardInfoToList(boosterList, "Ash Barrens", "CM2", "235");
addCardInfoToList(boosterList, "Ashnod's Altar", "EMA", "218");
addCardInfoToList(boosterList, "Benthic Infiltrator", "BFZ", "55");
addCardInfoToList(boosterList, "Blasted Landscape", "CM2", "238");
addCardInfoToList(boosterList, "Blighted Fen", "BFZ", "230");
addCardInfoToList(boosterList, "Blinding Souleater", "MM2", "202");
addCardInfoToList(boosterList, "Blossoming Sands", "C18", "237");
addCardInfoToList(boosterList, "Bojuka Bog", "C18", "238");
addCardInfoToList(boosterList, "Bomat Bazaar Barge", "KLD", "198");
addCardInfoToList(boosterList, "Bone Saw", "OGW", "161");
addCardInfoToList(boosterList, "Bottle Gnomes", "CM2", "177");
addCardInfoToList(boosterList, "Breaker of Armies", "BFZ", "3");
addCardInfoToList(boosterList, "Burnished Hart", "C15", "248");
addCardInfoToList(boosterList, "Call the Scions", "BFZ", "165");
addCardInfoToList(boosterList, "Cathodion", "CM2", "179");
addCardInfoToList(boosterList, "Coldsteel Heart", "CM2", "181");
addCardInfoToList(boosterList, "Consulate Dreadnought", "AER", "146");
addCardInfoToList(boosterList, "Copper Carapace", "MM2", "205");
addCardInfoToList(boosterList, "Crumbling Necropolis", "C17", "244");
addCardInfoToList(boosterList, "Crystal Ball", "C18", "201");
addCardInfoToList(boosterList, "Crystal Chimes", "C15", "250");
addCardInfoToList(boosterList, "Darksteel Citadel", "MM2", "238");
addCardInfoToList(boosterList, "Diamond Mare", "M19", "231");
addCardInfoToList(boosterList, "Dismal Backwater", "KTK", "232");
addCardInfoToList(boosterList, "Dreadship Reef", "CM2", "247");
addCardInfoToList(boosterList, "Eldrazi Devastator", "BFZ", "7");
addCardInfoToList(boosterList, "Emmessi Tome", "EMA", "221");
addCardInfoToList(boosterList, "Etched Oracle", "C16", "252");
addCardInfoToList(boosterList, "Evolving Wilds", "C18", "245");
addCardInfoToList(boosterList, "Faerie Conclave", "CMA", "248");
addCardInfoToList(boosterList, "Farmstead Gleaner", "MH1", "222");
addCardInfoToList(boosterList, "Field of Ruin", "XLN", "254");
addCardInfoToList(boosterList, "Filigree Familiar", "GNT", "52");
addCardInfoToList(boosterList, "Flayer Husk", "PCA", "110");
addCardInfoToList(boosterList, "Forgotten Cave", "C18", "246");
addCardInfoToList(boosterList, "Foundry Inspector", "KLD", "215");
addCardInfoToList(boosterList, "Fountain of Renewal", "M19", "235");
addCardInfoToList(boosterList, "Frogmite", "MM2", "215");
addCardInfoToList(boosterList, "Frontier Bivouac", "C17", "251");
addCardInfoToList(boosterList, "Gateway Plaza", "GRN", "247");
addCardInfoToList(boosterList, "Ghost Quarter", "CM2", "253");
addCardInfoToList(boosterList, "Goblin Burrows", "DD1", "58");
addCardInfoToList(boosterList, "Graypelt Refuge", "C17", "253");
addCardInfoToList(boosterList, "Great Furnace", "C18", "250");
addCardInfoToList(boosterList, "Gruul Signet", "C16", "256");
addCardInfoToList(boosterList, "Guardians of Meletis", "ORI", "228");
addCardInfoToList(boosterList, "Heavy Arbalest", "A25", "225");
addCardInfoToList(boosterList, "Herald's Horn", "C17", "53");
addCardInfoToList(boosterList, "Hexplate Golem", "BBD", "237");
addCardInfoToList(boosterList, "Hot Soup", "M15", "219");
addCardInfoToList(boosterList, "Icy Manipulator", "DOM", "219");
addCardInfoToList(boosterList, "Implement of Malice", "AER", "159");
addCardInfoToList(boosterList, "Irontread Crusher", "AER", "161");
addCardInfoToList(boosterList, "Juggernaut", "BBD", "238");
addCardInfoToList(boosterList, "Jungle Hollow", "KTK", "235");
addCardInfoToList(boosterList, "Jungle Shrine", "C17", "257");
addCardInfoToList(boosterList, "Kazandu Refuge", "C18", "261");
addCardInfoToList(boosterList, "Krosan Verge", "C18", "263");
addCardInfoToList(boosterList, "Lightning Greaves", "C19", "217");
addCardInfoToList(boosterList, "Loxodon Warhammer", "C17", "216");
addCardInfoToList(boosterList, "Mask of Memory", "C14", "249");
addCardInfoToList(boosterList, "Meteorite", "ORI", "233");
addCardInfoToList(boosterList, "Millikin", "EMA", "226");
addCardInfoToList(boosterList, "Millstone", "M19", "242");
addCardInfoToList(boosterList, "Mind Stone", "C18", "210");
addCardInfoToList(boosterList, "Mishra's Bauble", "IMA", "221");
addCardInfoToList(boosterList, "Mishra's Factory", "A25", "242");
addCardInfoToList(boosterList, "Moonglove Extract", "IMA", "222");
addCardInfoToList(boosterList, "Mortarpod", "MM2", "222");
addCardInfoToList(boosterList, "Myr Retriever", "CM2", "203");
addCardInfoToList(boosterList, "Myr Sire", "CM2", "204");
addCardInfoToList(boosterList, "New Benalia", "C18", "270");
addCardInfoToList(boosterList, "Ornithopter", "M15", "223");
addCardInfoToList(boosterList, "Orzhov Basilica", "C17", "268");
addCardInfoToList(boosterList, "Palladium Myr", "CM2", "207");
addCardInfoToList(boosterList, "Peace Strider", "BBD", "243");
addCardInfoToList(boosterList, "Perilous Myr", "A25", "227");
addCardInfoToList(boosterList, "Pilgrim's Eye", "GNT", "55");
addCardInfoToList(boosterList, "Prophetic Prism", "A25", "229");
addCardInfoToList(boosterList, "Reliquary Tower", "M19", "254");
addCardInfoToList(boosterList, "Renegade Map", "AER", "173");
addCardInfoToList(boosterList, "Rhonas's Monument", "AKH", "236");
addCardInfoToList(boosterList, "Rogue's Passage", "C17", "272");
addCardInfoToList(boosterList, "Sandsteppe Citadel", "CM2", "264");
addCardInfoToList(boosterList, "Sandstone Oracle", "CM2", "213");
addCardInfoToList(boosterList, "Scoured Barrens", "C18", "276");
addCardInfoToList(boosterList, "Sejiri Refuge", "C18", "280");
addCardInfoToList(boosterList, "Serrated Arrows", "DDD", "20");
addCardInfoToList(boosterList, "Short Sword", "DOM", "229");
addCardInfoToList(boosterList, "Sigil of Valor", "ORI", "239");
addCardInfoToList(boosterList, "Simic Locket", "RNA", "240");
addCardInfoToList(boosterList, "Skarrg, the Rage Pits", "PCA", "127");
addCardInfoToList(boosterList, "Skullclamp", "C17", "222");
addCardInfoToList(boosterList, "Skyscanner", "M19", "245");
addCardInfoToList(boosterList, "Sol Ring", "C18", "222");
addCardInfoToList(boosterList, "Sorcerer's Broom", "ELD", "232");
//addCardInfoToList(boosterList, "Spy Kit", "CN2", "79"); // not yet implemented
addCardInfoToList(boosterList, "Sunset Pyramid", "HOU", "166");
addCardInfoToList(boosterList, "Suspicious Bookcase", "M19", "246");
addCardInfoToList(boosterList, "Swiftwater Cliffs", "C18", "284");
addCardInfoToList(boosterList, "Tectonic Edge", "C14", "313");
addCardInfoToList(boosterList, "Temple of the False God", "C18", "285");
addCardInfoToList(boosterList, "Thornwood Falls", "C18", "287");
addCardInfoToList(boosterList, "Thought Vessel", "CM2", "224");
addCardInfoToList(boosterList, "Thran Dynamo", "IMA", "230");
addCardInfoToList(boosterList, "Thran Golem", "PCA", "114");
addCardInfoToList(boosterList, "Tormod's Crypt", "C14", "278");
addCardInfoToList(boosterList, "Trepanation Blade", "IMA", "231");
addCardInfoToList(boosterList, "Unclaimed Territory", "XLN", "258");
addCardInfoToList(boosterList, "Universal Automaton", "MH1", "235");
addCardInfoToList(boosterList, "Universal Solvent", "AER", "178");
addCardInfoToList(boosterList, "Whispersilk Cloak", "PCA", "115");
addCardInfoToList(boosterList, "Wirewood Lodge", "DD1", "26");
}
private void addSlot13oldFrame(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Ana Sanctuary", "APC", "74");
addCardInfoToList(boosterList, "Ancient Den", "MRD", "278");
addCardInfoToList(boosterList, "Ancient Ziggurat", "CON", "141");
addCardInfoToList(boosterList, "Angelic Destiny", "M12", "3");
addCardInfoToList(boosterList, "Archangel", "C13", "5");
addCardInfoToList(boosterList, "Asceticism", "SOM", "110");
addCardInfoToList(boosterList, "Assemble the Legion", "GTC", "142");
addCardInfoToList(boosterList, "Athreos, God of Passage", "JOU", "146");
addCardInfoToList(boosterList, "Aura Shards", "CMD", "182");
addCardInfoToList(boosterList, "Avalanche Riders", "ULG", "74");
addCardInfoToList(boosterList, "Bear Cub", "P02", "123");
addCardInfoToList(boosterList, "Belbe's Portal", "NEM", "127");
addCardInfoToList(boosterList, "Black Knight", "M10", "85");
addCardInfoToList(boosterList, "Bloom Tender", "EVE", "66");
addCardInfoToList(boosterList, "Bonesplitter", "MRD", "146");
addCardInfoToList(boosterList, "Bow of Nylea", "THS", "153");
addCardInfoToList(boosterList, "Brimstone Dragon", "P02", "92");
addCardInfoToList(boosterList, "Brimstone Mage", "ROE", "137");
addCardInfoToList(boosterList, "Cairn Wanderer", "LRW", "105");
addCardInfoToList(boosterList, "Carpet of Flowers", "USG", "240");
addCardInfoToList(boosterList, "Centaur Glade", "ONS", "251");
addCardInfoToList(boosterList, "Chancellor of the Annex", "NPH", "6");
addCardInfoToList(boosterList, "Chatter of the Squirrel", "ODY", "233");
addCardInfoToList(boosterList, "Chromatic Star", "TSP", "251");
addCardInfoToList(boosterList, "Contagion Clasp", "SOM", "144");
addCardInfoToList(boosterList, "Corrupted Conscience", "MBS", "22");
addCardInfoToList(boosterList, "Cragganwick Cremator", "SHM", "87");
addCardInfoToList(boosterList, "Crenellated Wall", "MMQ", "290");
addCardInfoToList(boosterList, "Crystal Shard", "MRD", "159");
addCardInfoToList(boosterList, "Darksteel Garrison", "FUT", "167");
addCardInfoToList(boosterList, "Dauthi Mindripper", "TMP", "125");
addCardInfoToList(boosterList, "Defense of the Heart", "ULG", "100");
addCardInfoToList(boosterList, "Dictate of Erebos", "JOU", "65");
addCardInfoToList(boosterList, "Dolmen Gate", "LRW", "256");
addCardInfoToList(boosterList, "Dominus of Fealty", "CMD", "194");
addCardInfoToList(boosterList, "Doomgape", "DDJ", "65");
addCardInfoToList(boosterList, "Draco", "PLS", "131");
addCardInfoToList(boosterList, "Dragon Broodmother", "ARB", "53");
addCardInfoToList(boosterList, "Dragon Mask", "VIS", "144");
addCardInfoToList(boosterList, "Dungrove Elder", "M12", "171");
addCardInfoToList(boosterList, "Eater of Days", "DST", "120");
addCardInfoToList(boosterList, "Elixir of Immortality", "M11", "206");
addCardInfoToList(boosterList, "Empyrial Armor", "WTH", "13");
addCardInfoToList(boosterList, "Enchanted Evening", "SHM", "140");
addCardInfoToList(boosterList, "Energy Field", "USG", "73");
addCardInfoToList(boosterList, "Exsanguinate", "SOM", "61");
addCardInfoToList(boosterList, "Flameshot", "PCY", "90");
addCardInfoToList(boosterList, "Floodgate", "MIR", "67");
addCardInfoToList(boosterList, "Font of Mythos", "CON", "136");
addCardInfoToList(boosterList, "Ghitu War Cry", "ULG", "78");
addCardInfoToList(boosterList, "Gilt-Leaf Palace", "LRW", "268");
addCardInfoToList(boosterList, "Goblin Game", "PLS", "61");
addCardInfoToList(boosterList, "Greater Gargadon", "MMA", "117");
addCardInfoToList(boosterList, "Guided Passage", "APC", "105");
addCardInfoToList(boosterList, "Haakon, Stromgald Scourge", "CSP", "61");
addCardInfoToList(boosterList, "Hedron Crab", "ZEN", "47");
addCardInfoToList(boosterList, "Helm of Awakening", "VIS", "145");
addCardInfoToList(boosterList, "Hunter of Eyeblights", "LRW", "119");
addCardInfoToList(boosterList, "Hurricane", "10E", "270");
addCardInfoToList(boosterList, "Hypnotic Specter", "M10", "100");
addCardInfoToList(boosterList, "Impending Disaster", "ULG", "82");
addCardInfoToList(boosterList, "Jushi Apprentice", "CHK", "70");
addCardInfoToList(boosterList, "Kaervek's Torch", "MIR", "185");
addCardInfoToList(boosterList, "Kargan Dragonlord", "ROE", "152");
addCardInfoToList(boosterList, "Knight of Dawn", "TMP", "26");
addCardInfoToList(boosterList, "Knollspine Dragon", "SHM", "98");
addCardInfoToList(boosterList, "Kor Chant", "CNS", "73");
addCardInfoToList(boosterList, "Kruphix, God of Horizons", "JOU", "152");
addCardInfoToList(boosterList, "Lashknife Barrier", "PLS", "9");
addCardInfoToList(boosterList, "Lotus Petal", "TMP", "294");
addCardInfoToList(boosterList, "Maelstrom Archangel", "CON", "115");
addCardInfoToList(boosterList, "Magus of the Moat", "FUT", "12");
addCardInfoToList(boosterList, "Mana Tithe", "PLC", "25");
addCardInfoToList(boosterList, "Manamorphose", "SHM", "211");
addCardInfoToList(boosterList, "Martyr's Bond", "CMD", "19");
addCardInfoToList(boosterList, "Martyr's Cause", "ULG", "13");
addCardInfoToList(boosterList, "Master Transmuter", "CON", "31");
addCardInfoToList(boosterList, "Meddling Mage", "ARB", "8");
addCardInfoToList(boosterList, "Mistform Shrieker", "ONS", "96");
addCardInfoToList(boosterList, "Nemesis of Reason", "ARB", "28");
addCardInfoToList(boosterList, "Oracle of Nectars", "SHM", "233");
addCardInfoToList(boosterList, "Pathrazer of Ulamog", "ROE", "9");
addCardInfoToList(boosterList, "Perish", "TMP", "147");
addCardInfoToList(boosterList, "Pestilence", "USG", "147");
addCardInfoToList(boosterList, "Phantasmal Dragon", "DDM", "14");
addCardInfoToList(boosterList, "Phantom Centaur", "JUD", "127");
addCardInfoToList(boosterList, "Phyrexian Metamorph", "NPH", "42");
addCardInfoToList(boosterList, "Phyrexian Soulgorger", "CSP", "141");
addCardInfoToList(boosterList, "Purphoros, God of the Forge", "THS", "135");
addCardInfoToList(boosterList, "Questing Phelddagrif", "PLS", "119");
addCardInfoToList(boosterList, "Rage Reflection", "SHM", "104");
addCardInfoToList(boosterList, "Recoup", "DDK", "63");
addCardInfoToList(boosterList, "Release the Ants", "MOR", "98");
addCardInfoToList(boosterList, "Rhys the Redeemed", "SHM", "237");
addCardInfoToList(boosterList, "Rhystic Study", "PCY", "45");
addCardInfoToList(boosterList, "Rishadan Footpad", "MMQ", "94");
addCardInfoToList(boosterList, "Rith, the Awakener", "DDE", "48");
addCardInfoToList(boosterList, "River Boa", "ZEN", "180");
addCardInfoToList(boosterList, "Sadistic Hypnotist", "ODY", "159");
addCardInfoToList(boosterList, "Sakashima the Impostor", "SOK", "53");
addCardInfoToList(boosterList, "Sapphire Charm", "MIR", "89");
addCardInfoToList(boosterList, "Shrouded Lore", "PLC", "91");
addCardInfoToList(boosterList, "Soothsaying", "MMQ", "104");
addCardInfoToList(boosterList, "Sorin Markov", "ZEN", "111");
addCardInfoToList(boosterList, "Squirrel Wrangler", "PCY", "127");
addCardInfoToList(boosterList, "Thieving Magpie", "UDS", "49");
addCardInfoToList(boosterList, "Thrun, the Last Troll", "MBS", "92");
addCardInfoToList(boosterList, "Time Sieve", "ARB", "31");
addCardInfoToList(boosterList, "Timely Reinforcements", "M12", "40");
addCardInfoToList(boosterList, "Tinker", "ULG", "45");
addCardInfoToList(boosterList, "Tower of Eons", "MRD", "266");
addCardInfoToList(boosterList, "Toxin Sliver", "LGN", "84");
addCardInfoToList(boosterList, "Triumph of the Hordes", "NPH", "123");
addCardInfoToList(boosterList, "Umbral Mantle", "SHM", "267");
addCardInfoToList(boosterList, "Viashino Sandstalker", "VIS", "100");
addCardInfoToList(boosterList, "Violent Ultimatum", "ALA", "206");
addCardInfoToList(boosterList, "Volunteer Reserves", "WTH", "29");
addCardInfoToList(boosterList, "Wargate", "ARB", "129");
addCardInfoToList(boosterList, "Weathered Wayfarer", "ONS", "59");
addCardInfoToList(boosterList, "Wild Nacatl", "ALA", "152");
addCardInfoToList(boosterList, "Yavimaya's Embrace", "APC", "127");
}
private void addSlot14rare(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Adorned Pouncer", "HOU", "2");
addCardInfoToList(boosterList, "Aetherflux Reservoir", "KLD", "192");
addCardInfoToList(boosterList, "Akroan Horse", "C16", "241");
addCardInfoToList(boosterList, "Alesha, Who Smiles at Death", "FRF", "90");
addCardInfoToList(boosterList, "Alhammarret's Archive", "ORI", "221");
addCardInfoToList(boosterList, "All Is Dust", "MM2", "1");
addCardInfoToList(boosterList, "Aminatou's Augury", "C18", "6");
addCardInfoToList(boosterList, "Angel of the Dire Hour", "C14", "1");
addCardInfoToList(boosterList, "Anger of the Gods", "IMA", "116");
addCardInfoToList(boosterList, "Animar, Soul of Elements", "A25", "196");
addCardInfoToList(boosterList, "Approach of the Second Sun", "AKH", "4");
addCardInfoToList(boosterList, "Arch of Orazca", "RIX", "185");
addCardInfoToList(boosterList, "Basilisk Collar", "MM3", "216");
addCardInfoToList(boosterList, "Beacon of Immortality", "E02", "1");
addCardInfoToList(boosterList, "Beastmaster Ascension", "CMA", "92");
addCardInfoToList(boosterList, "Birds of Paradise", "CN2", "176");
addCardInfoToList(boosterList, "Black Market", "C17", "98");
addCardInfoToList(boosterList, "Boompile", "C16", "52");
addCardInfoToList(boosterList, "Boros Reckoner", "GK1", "85");
addCardInfoToList(boosterList, "Caged Sun", "CM2", "178");
addCardInfoToList(boosterList, "Cauldron of Souls", "CM2", "180");
addCardInfoToList(boosterList, "Champion of the Parish", "DDQ", "4");
addCardInfoToList(boosterList, "Chaos Warp", "C17", "131");
addCardInfoToList(boosterList, "Chasm Skulker", "M15", "46");
addCardInfoToList(boosterList, "Chromatic Lantern", "GRN", "233");
addCardInfoToList(boosterList, "Coat of Arms", "DDS", "58");
addCardInfoToList(boosterList, "Collective Brutality", "EMN", "85");
addCardInfoToList(boosterList, "Commit // Memory", "AKH", "211");
addCardInfoToList(boosterList, "Courser of Kruphix", "A25", "164");
addCardInfoToList(boosterList, "Coveted Jewel", "C18", "54");
addCardInfoToList(boosterList, "Daretti, Scrap Savant", "C14", "33");
addCardInfoToList(boosterList, "Deadly Tempest", "C15", "19");
addCardInfoToList(boosterList, "Debtors' Knell", "GK2", "39");
addCardInfoToList(boosterList, "Decree of Justice", "DDO", "7");
addCardInfoToList(boosterList, "Deepglow Skate", "CM2", "39");
addCardInfoToList(boosterList, "Desolation Twin", "BFZ", "6");
addCardInfoToList(boosterList, "Dictate of Heliod", "DDO", "8");
addCardInfoToList(boosterList, "Djinn of Wishes", "C18", "87");
addCardInfoToList(boosterList, "Dragonlord Ojutai", "DTK", "219");
addCardInfoToList(boosterList, "Drana, Kalastria Bloodchief", "C17", "112");
addCardInfoToList(boosterList, "Eldrazi Monument", "CMA", "216");
addCardInfoToList(boosterList, "Eldritch Evolution", "EMN", "155");
addCardInfoToList(boosterList, "Elesh Norn, Grand Cenobite", "IMA", "18");
addCardInfoToList(boosterList, "Evra, Halcyon Witness", "DOM", "16");
addCardInfoToList(boosterList, "Expropriate", "CN2", "30");
addCardInfoToList(boosterList, "Fblthp, the Lost", "WAR", "50");
addCardInfoToList(boosterList, "Felidar Sovereign", "BFZ", "26");
addCardInfoToList(boosterList, "Gideon Jura", "E01", "10");
addCardInfoToList(boosterList, "Goblin Charbelcher", "DDT", "57");
addCardInfoToList(boosterList, "Goblin Piledriver", "ORI", "151");
addCardInfoToList(boosterList, "Gonti, Lord of Luxury", "KLD", "84");
addCardInfoToList(boosterList, "Grasp of Fate", "C15", "3");
addCardInfoToList(boosterList, "Grave Titan", "C14", "145");
addCardInfoToList(boosterList, "Gravecrawler", "DDQ", "59");
addCardInfoToList(boosterList, "Greenbelt Rampager", "AER", "107");
addCardInfoToList(boosterList, "Hornet Nest", "M15", "177");
addCardInfoToList(boosterList, "Kiki-Jiki, Mirror Breaker", "MM2", "121");
addCardInfoToList(boosterList, "Kolaghan's Command", "DTK", "224");
addCardInfoToList(boosterList, "Krenko, Mob Boss", "DDT", "52");
addCardInfoToList(boosterList, "Liliana, Death's Majesty", "AKH", "97");
addCardInfoToList(boosterList, "Living Death", "A25", "96");
addCardInfoToList(boosterList, "Mana Crypt", "EMA", "225");
addCardInfoToList(boosterList, "Meandering Towershell", "KTK", "141");
addCardInfoToList(boosterList, "Memory Erosion", "CM2", "45");
addCardInfoToList(boosterList, "Meren of Clan Nel Toth", "C15", "49");
addCardInfoToList(boosterList, "Mimic Vat", "C19", "219");
addCardInfoToList(boosterList, "Mind Shatter", "MM3", "77");
addCardInfoToList(boosterList, "Mind Spring", "DDT", "14");
addCardInfoToList(boosterList, "Mirran Crusader", "MM2", "25");
addCardInfoToList(boosterList, "Mirror Entity", "CMA", "16");
addCardInfoToList(boosterList, "Misdirection", "DDT", "15");
addCardInfoToList(boosterList, "Mizzix's Mastery", "C15", "29");
addCardInfoToList(boosterList, "Mycoloth", "CMA", "129");
addCardInfoToList(boosterList, "Mystic Confluence", "BBD", "122");
addCardInfoToList(boosterList, "Nighthowler", "C15", "129");
addCardInfoToList(boosterList, "Nin, the Pain Artist", "C17", "183");
addCardInfoToList(boosterList, "Nissa, Voice of Zendikar", "OGW", "138");
addCardInfoToList(boosterList, "Odric, Lunarch Marshal", "SOI", "31");
addCardInfoToList(boosterList, "Phyrexian Arena", "CN2", "144");
addCardInfoToList(boosterList, "Phyrexian Plaguelord", "CMA", "61");
addCardInfoToList(boosterList, "Precursor Golem", "MM2", "225");
addCardInfoToList(boosterList, "Preyseizer Dragon", "PCA", "50");
addCardInfoToList(boosterList, "Queen Marchesa", "CN2", "78");
addCardInfoToList(boosterList, "Reality Scramble", "C18", "25");
addCardInfoToList(boosterList, "Recruiter of the Guard", "CN2", "22");
addCardInfoToList(boosterList, "Release the Gremlins", "AER", "96");
addCardInfoToList(boosterList, "Revel in Riches", "XLN", "117");
addCardInfoToList(boosterList, "Rune-Scarred Demon", "IMA", "106");
addCardInfoToList(boosterList, "Savage Knuckleblade", "KTK", "197");
addCardInfoToList(boosterList, "Selvala, Heart of the Wilds", "CN2", "70");
addCardInfoToList(boosterList, "Serendib Efreet", "EMA", "70");
addCardInfoToList(boosterList, "Sewer Nemesis", "CM2", "75");
addCardInfoToList(boosterList, "Shamanic Revelation", "FRF", "138");
addCardInfoToList(boosterList, "Sliver Hivelord", "M15", "211");
addCardInfoToList(boosterList, "Solemn Simulacrum", "DDU", "62");
addCardInfoToList(boosterList, "Spawning Grounds", "C18", "163");
addCardInfoToList(boosterList, "Star of Extinction", "XLN", "161");
addCardInfoToList(boosterList, "Steamflogger Boss", "UST", "93");
addCardInfoToList(boosterList, "Stunt Double", "CN2", "38");
addCardInfoToList(boosterList, "Sudden Demise", "E01", "59");
addCardInfoToList(boosterList, "Supreme Verdict", "IMA", "210");
addCardInfoToList(boosterList, "Sword of the Animist", "E01", "89");
addCardInfoToList(boosterList, "Talrand, Sky Summoner", "DDS", "11");
addCardInfoToList(boosterList, "Taurean Mauler", "CM2", "122");
addCardInfoToList(boosterList, "Teferi, Temporal Archmage", "C14", "19");
addCardInfoToList(boosterList, "Teferi's Protection", "C17", "8");
addCardInfoToList(boosterList, "Temporal Mastery", "MM3", "54");
addCardInfoToList(boosterList, "Tempt with Discovery", "C16", "170");
addCardInfoToList(boosterList, "Thalia's Lancers", "EMN", "47");
addCardInfoToList(boosterList, "The Gitrog Monster", "SOI", "245");
addCardInfoToList(boosterList, "The Mirari Conjecture", "DOM", "57");
addCardInfoToList(boosterList, "Tireless Tracker", "SOI", "233");
addCardInfoToList(boosterList, "Torment of Hailfire", "HOU", "77");
addCardInfoToList(boosterList, "Trading Post", "CM2", "225");
addCardInfoToList(boosterList, "Two-Headed Giant", "DOM", "147");
addCardInfoToList(boosterList, "Urza's Rage", "C15", "169");
addCardInfoToList(boosterList, "Vigor", "BBD", "215");
addCardInfoToList(boosterList, "Wheel of Fate", "C16", "138");
addCardInfoToList(boosterList, "Whelming Wave", "DDO", "44");
addCardInfoToList(boosterList, "Whir of Invention", "AER", "49");
addCardInfoToList(boosterList, "Yuriko, the Tiger's Shadow", "C18", "52");
}
private void addSlot15foil(List<CardInfo> boosterList) {
addCardInfoToList(boosterList, "Alchemist's Refuge", "AVR", "225");
addCardInfoToList(boosterList, "Allosaurus Rider", "CSP", "101");
addCardInfoToList(boosterList, "Amulet of Vigor", "WWK", "121");
addCardInfoToList(boosterList, "Archetype of Endurance", "BNG", "116");
addCardInfoToList(boosterList, "Aurelia's Fury", "GTC", "144");
addCardInfoToList(boosterList, "Balduvian Rage", "CSP", "76");
addCardInfoToList(boosterList, "Balefire Liege", "EVE", "132");
addCardInfoToList(boosterList, "Blasting Station", "5DN", "107");
addCardInfoToList(boosterList, "Blighted Agent", "NPH", "29");
addCardInfoToList(boosterList, "Boreal Druid", "CSP", "105");
addCardInfoToList(boosterList, "Boundless Realms", "M13", "162");
addCardInfoToList(boosterList, "Braid of Fire", "CSP", "78");
addCardInfoToList(boosterList, "Bramblewood Paragon", "MOR", "115");
addCardInfoToList(boosterList, "Bringer of the Black Dawn", "5DN", "43");
addCardInfoToList(boosterList, "Burning Inquiry", "M10", "128");
addCardInfoToList(boosterList, "Celestial Dawn", "TSB", "3");
addCardInfoToList(boosterList, "Celestial Kirin", "SOK", "3");
addCardInfoToList(boosterList, "Changeling Hero", "LRW", "9");
addCardInfoToList(boosterList, "Chimney Imp", "MRD", "59");
addCardInfoToList(boosterList, "Codex Shredder", "RTR", "228");
addCardInfoToList(boosterList, "Conspiracy", "TSB", "39");
addCardInfoToList(boosterList, "Council Guardian", "CNS", "15");
addCardInfoToList(boosterList, "Delay", "FUT", "35");
addCardInfoToList(boosterList, "Drogskol Captain", "DKA", "136");
addCardInfoToList(boosterList, "Echoing Decay", "DST", "41");
addCardInfoToList(boosterList, "Eidolon of Rhetoric", "JOU", "10");
addCardInfoToList(boosterList, "Fatespinner", "MRD", "36");
addCardInfoToList(boosterList, "Fiery Gambit", "MRD", "90");
addCardInfoToList(boosterList, "Flamekin Harbinger", "LRW", "167");
addCardInfoToList(boosterList, "Form of the Dragon", "9ED", "187");
addCardInfoToList(boosterList, "Frozen Aether", "PLC", "54");
addCardInfoToList(boosterList, "Funeral Charm", "TSB", "44");
addCardInfoToList(boosterList, "Fungusaur", "8ED", "250");
addCardInfoToList(boosterList, "Game-Trail Changeling", "MOR", "123");
addCardInfoToList(boosterList, "Geth's Grimoire", "DST", "123");
addCardInfoToList(boosterList, "Gilder Bairn", "EVE", "152");
addCardInfoToList(boosterList, "Gleeful Sabotage", "SHM", "116");
addCardInfoToList(boosterList, "Glittering Wish", "FUT", "156");
addCardInfoToList(boosterList, "Goblin Bushwhacker", "ZEN", "125");
addCardInfoToList(boosterList, "Grand Architect", "SOM", "33");
addCardInfoToList(boosterList, "Greater Mossdog", "MMA", "146");
addCardInfoToList(boosterList, "Guerrilla Tactics", "10E", "211");
addCardInfoToList(boosterList, "Harmonic Sliver", "TSP", "240");
addCardInfoToList(boosterList, "Helix Pinnacle", "EVE", "68");
addCardInfoToList(boosterList, "Herald of Leshrac", "CSP", "62");
addCardInfoToList(boosterList, "Hornet Sting", "M11", "181");
addCardInfoToList(boosterList, "Intruder Alarm", "8ED", "86");
addCardInfoToList(boosterList, "Iron Myr", "SOM", "168");
addCardInfoToList(boosterList, "Isamaru, Hound of Konda", "CHK", "19");
addCardInfoToList(boosterList, "Karrthus, Tyrant of Jund", "ARB", "117");
addCardInfoToList(boosterList, "Knowledge Pool", "MBS", "111");
addCardInfoToList(boosterList, "Kulrath Knight", "SHM", "190");
addCardInfoToList(boosterList, "Lantern of Insight", "5DN", "135");
addCardInfoToList(boosterList, "Lapse of Certainty", "CON", "9");
addCardInfoToList(boosterList, "Leveler", "MRD", "195");
addCardInfoToList(boosterList, "Lich's Mirror", "ALA", "210");
addCardInfoToList(boosterList, "Lightning Storm", "CSP", "89");
addCardInfoToList(boosterList, "Lumithread Field", "FUT", "25");
addCardInfoToList(boosterList, "Maelstrom Nexus", "ARB", "130");
addCardInfoToList(boosterList, "Magewright's Stone", "DIS", "162");
addCardInfoToList(boosterList, "Manaweft Sliver", "M14", "184");
addCardInfoToList(boosterList, "Maro", "8ED", "264");
addCardInfoToList(boosterList, "Marrow-Gnawer", "CHK", "124");
addCardInfoToList(boosterList, "Memnite", "SOM", "174");
addCardInfoToList(boosterList, "Minamo, School at Water's Edge", "CHK", "279");
addCardInfoToList(boosterList, "Mind Funeral", "MMA", "181");
addCardInfoToList(boosterList, "Mindslaver", "MRD", "206");
addCardInfoToList(boosterList, "Mirrodin's Core", "DST", "165");
addCardInfoToList(boosterList, "Misthollow Griffin", "AVR", "68");
addCardInfoToList(boosterList, "Myojin of Life's Web", "CHK", "229");
addCardInfoToList(boosterList, "Nezumi Shortfang", "CHK", "131");
addCardInfoToList(boosterList, "Noggle Bandit", "EVE", "106");
addCardInfoToList(boosterList, "Norin the Wary", "TSP", "171");
addCardInfoToList(boosterList, "Norn's Annex", "NPH", "17");
addCardInfoToList(boosterList, "Not of This World", "ROE", "8");
addCardInfoToList(boosterList, "Ogre Gatecrasher", "DIS", "67");
addCardInfoToList(boosterList, "One with Nothing", "SOK", "84");
addCardInfoToList(boosterList, "Panglacial Wurm", "CSP", "116");
addCardInfoToList(boosterList, "Paradox Haze", "TSP", "71");
addCardInfoToList(boosterList, "Patron of the Moon", "BOK", "45");
addCardInfoToList(boosterList, "Pili-Pala", "SHM", "258");
addCardInfoToList(boosterList, "Proclamation of Rebirth", "DIS", "15");
addCardInfoToList(boosterList, "Puca's Mischief", "SHM", "47");
addCardInfoToList(boosterList, "Pull from Eternity", "TSP", "35");
addCardInfoToList(boosterList, "Pyretic Ritual", "M11", "153");
addCardInfoToList(boosterList, "Ravenous Trap", "ZEN", "109");
addCardInfoToList(boosterList, "Reaper King", "SHM", "260");
addCardInfoToList(boosterList, "Reki, the History of Kamigawa", "SOK", "142");
addCardInfoToList(boosterList, "Rescue from the Underworld", "THS", "102");
addCardInfoToList(boosterList, "Rhox", "10E", "291");
addCardInfoToList(boosterList, "Rune-Tail, Kitsune Ascendant", "SOK", "27");
addCardInfoToList(boosterList, "Sakura-Tribe Scout", "SOK", "144");
addCardInfoToList(boosterList, "Sarkhan the Mad", "ROE", "214");
addCardInfoToList(boosterList, "Scourge of the Throne", "CNS", "35");
addCardInfoToList(boosterList, "Scryb Ranger", "TSP", "215");
addCardInfoToList(boosterList, "Sen Triplets", "ARB", "109");
addCardInfoToList(boosterList, "Sheltering Ancient", "CSP", "121");
addCardInfoToList(boosterList, "Shizo, Death's Storehouse", "CHK", "283");
addCardInfoToList(boosterList, "Sinew Sliver", "PLC", "30");
addCardInfoToList(boosterList, "Sosuke, Son of Seshiro", "CHK", "244");
addCardInfoToList(boosterList, "Soul's Attendant", "ROE", "44");
addCardInfoToList(boosterList, "Spelltithe Enforcer", "GPT", "18");
addCardInfoToList(boosterList, "Spellweaver Volute", "FUT", "59");
addCardInfoToList(boosterList, "Spike Feeder", "TSB", "84");
addCardInfoToList(boosterList, "Springjack Shepherd", "EVE", "15");
addCardInfoToList(boosterList, "Stalking Stones", "MRD", "284");
addCardInfoToList(boosterList, "Stigma Lasher", "EVE", "62");
addCardInfoToList(boosterList, "Storm Crow", "9ED", "100");
addCardInfoToList(boosterList, "Sundial of the Infinite", "M12", "218");
addCardInfoToList(boosterList, "Teferi's Puzzle Box", "8ED", "316");
addCardInfoToList(boosterList, "Trailblazer's Boots", "ZEN", "208");
addCardInfoToList(boosterList, "Treasonous Ogre", "CNS", "36");
addCardInfoToList(boosterList, "Triskelion", "M11", "218");
addCardInfoToList(boosterList, "Undead Warchief", "TSB", "52");
addCardInfoToList(boosterList, "Viscera Seer", "M11", "120");
addCardInfoToList(boosterList, "Wall of Shards", "CSP", "23");
addCardInfoToList(boosterList, "Wear // Tear", "DGM", "135");
addCardInfoToList(boosterList, "White Knight", "M10", "41");
addCardInfoToList(boosterList, "Witchbane Orb", "ISD", "236");
addCardInfoToList(boosterList, "Yore-Tiller Nephilim", "GPT", "140");
addCardInfoToList(boosterList, "Zur's Weirding", "8ED", "116");
}
}
| magefree/mage | Mage.Sets/src/mage/sets/MysteryBooster.java |
1,481 | package cn.hutool.core.lang.id;
import cn.hutool.core.util.RandomUtil;
import java.security.SecureRandom;
import java.util.Random;
/**
* NanoId,一个小型、安全、对 URL友好的唯一字符串 ID 生成器,特点:
*
* <ul>
* <li>安全:它使用加密、强大的随机 API,并保证符号的正确分配</li>
* <li>体积小:只有 258 bytes 大小(压缩后)、无依赖</li>
* <li>紧凑:它使用比 UUID (A-Za-z0-9_~)更多的符号</li>
* </ul>
*
* <p>
* 此实现的逻辑基于JavaScript的NanoId实现,见:https://github.com/ai/nanoid
*
* @author David Klebanoff
*/
public class NanoId {
/**
* 默认随机数生成器,使用{@link SecureRandom}确保健壮性
*/
private static final SecureRandom DEFAULT_NUMBER_GENERATOR = RandomUtil.getSecureRandom();
/**
* 默认随机字母表,使用URL安全的Base64字符
*/
private static final char[] DEFAULT_ALPHABET =
"_-0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".toCharArray();
/**
* 默认长度
*/
public static final int DEFAULT_SIZE = 21;
/**
* 生成伪随机的NanoId字符串,长度为默认的{@link #DEFAULT_SIZE},使用密码安全的伪随机生成器
*
* @return 伪随机的NanoId字符串
*/
public static String randomNanoId() {
return randomNanoId(DEFAULT_SIZE);
}
/**
* 生成伪随机的NanoId字符串
*
* @param size ID长度
* @return 伪随机的NanoId字符串
*/
public static String randomNanoId(int size) {
return randomNanoId(null, null, size);
}
/**
* 生成伪随机的NanoId字符串
*
* @param random 随机数生成器
* @param alphabet 随机字母表
* @param size ID长度
* @return 伪随机的NanoId字符串
*/
public static String randomNanoId(Random random, char[] alphabet, int size) {
if (random == null) {
random = DEFAULT_NUMBER_GENERATOR;
}
if (alphabet == null) {
alphabet = DEFAULT_ALPHABET;
}
if (alphabet.length == 0 || alphabet.length >= 256) {
throw new IllegalArgumentException("Alphabet must contain between 1 and 255 symbols.");
}
if (size <= 0) {
throw new IllegalArgumentException("Size must be greater than zero.");
}
final int mask = (2 << (int) Math.floor(Math.log(alphabet.length - 1) / Math.log(2))) - 1;
final int step = (int) Math.ceil(1.6 * mask * size / alphabet.length);
final StringBuilder idBuilder = new StringBuilder();
while (true) {
final byte[] bytes = new byte[step];
random.nextBytes(bytes);
for (int i = 0; i < step; i++) {
final int alphabetIndex = bytes[i] & mask;
if (alphabetIndex < alphabet.length) {
idBuilder.append(alphabet[alphabetIndex]);
if (idBuilder.length() == size) {
return idBuilder.toString();
}
}
}
}
}
}
| dromara/hutool | hutool-core/src/main/java/cn/hutool/core/lang/id/NanoId.java |
1,482 | package cn.hutool.core.util;
/**
* 进制转换工具类,可以转换为任意进制
* <p>
* 把一个十进制整数根据自己定义的进制规则进行转换<br>
* from:https://gitee.com/loolly/hutool/pulls/260
* <p>
* 主要应用一下情况:
* <ul>
* <li>根据ID生成邀请码,并且尽可能的缩短。并且不希望直接猜测出和ID的关联</li>
* <li>短连接的生成,根据ID转成短连接,同样不希望被猜测到</li>
* <li>数字加密,通过两次不同进制的转换,让有规律的数字看起来没有任何规律</li>
* <li>....</li>
* </ul>
*
* @author [email protected]
* @since 5.5.8
*/
public class RadixUtil {
/**
* 34进制字符串,不包含 IO 字符
* 对于需要补齐的,自己可以随机填充IO字符
* 26个字母:abcdefghijklmnopqrstuvwxyz
*/
public final static String RADIXS_34 = "0123456789ABCDEFGHJKLMNPQRSTUVWXYZ";
/**
* 打乱后的34进制
*/
public final static String RADIXS_SHUFFLE_34 = "H3UM16TDFPSBZJ90CW28QYRE45AXKNGV7L";
/**
* 59进制字符串,不包含 IOl 字符
*/
public final static String RADIXS_59 = "0123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ";
/**
* 打乱后的59进制
*/
public final static String RADIXS_SHUFFLE_59 = "vh9wGkfK8YmqbsoENP3764SeCX0dVzrgy1HRtpnTaLjJW2xQiZAcBMUFDu5";
/**
* 把一个整型数值转换成自己定义的进制
* 长度即进制<br>
* <ul>
* <li>encode("AB",10) 51转换成2进制,A=0;B=1 。 二进制1010,结果 BABA</li>
* <li>encode("VIP",21) 21转换成3进制,V=0;I=1;P=2 ,三进制210 ,得到结果PIV </li>
* </ul>
*
* @param radixs 自定进制,不要重复,否则转不回来的。
* @param num 要转换的数值
* @return 自定义进制字符串
*/
public static String encode(String radixs, int num) {
//考虑到负数问题
long tmpNum = (num >= 0 ? num : (0x100000000L - (~num + 1)));
return encode(radixs, tmpNum, 32);
}
/**
* 把一个长整型数值转换成自己定义的进制
*
* @param radixs 自定进制,不要重复,否则转不回来的。
* @param num 要转换的数值
* @return 自定义进制字符串
*/
public static String encode(String radixs, long num) {
if (num < 0) {
throw new RuntimeException("暂不支持负数!");
}
return encode(radixs, num, 64);
}
/**
* 把转换后的进制字符还原成int 值
*
* @param radixs 自定进制,需要和encode的保持一致
* @param encodeStr 需要转换成十进制的字符串
* @return int
*/
public static int decodeToInt(String radixs, String encodeStr) {
//还原负数
return (int) decode(radixs, encodeStr);
}
/**
* 把转换后进制的字符还原成long 值
*
* @param radixs 自定进制,需要和encode的保持一致
* @param encodeStr 需要转换成十进制的字符串
* @return long
*/
public static long decode(String radixs, String encodeStr) {
//目标是多少进制
int rl = radixs.length();
long res = 0L;
for (char c : encodeStr.toCharArray()) {
res = res * rl + radixs.indexOf(c);
}
return res;
}
// -------------------------------------------------------------------------------- Private methods
private static String encode(String radixs, long num, int maxLength) {
if (radixs.length() < 2) {
throw new RuntimeException("自定义进制最少两个字符哦!");
}
//目标是多少进制
int rl = radixs.length();
//考虑到负数问题
long tmpNum = num;
//进制的结果,二进制最小进制转换结果是32个字符
//StringBuilder 比较耗时
char[] aa = new char[maxLength];
//因为反需字符串比较耗时
int i = aa.length;
do {
aa[--i] = radixs.charAt((int) (tmpNum % rl));
tmpNum /= rl;
} while (tmpNum > 0);
//去掉前面的字符串,trim比较耗时
return new String(aa, i, aa.length - i);
}
}
| dromara/hutool | hutool-core/src/main/java/cn/hutool/core/util/RadixUtil.java |
1,483 | /*
* Copyright 2000-2022 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.client;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.logging.Logger;
import com.google.gwt.core.client.JavaScriptObject;
import com.google.gwt.core.client.Scheduler;
import com.google.gwt.dom.client.AnchorElement;
import com.google.gwt.dom.client.DivElement;
import com.google.gwt.dom.client.Document;
import com.google.gwt.dom.client.Element;
import com.google.gwt.dom.client.NativeEvent;
import com.google.gwt.dom.client.Node;
import com.google.gwt.dom.client.NodeList;
import com.google.gwt.dom.client.Style;
import com.google.gwt.dom.client.Style.Unit;
import com.google.gwt.dom.client.Touch;
import com.google.gwt.event.dom.client.KeyEvent;
import com.google.gwt.regexp.shared.MatchResult;
import com.google.gwt.regexp.shared.RegExp;
import com.google.gwt.user.client.DOM;
import com.google.gwt.user.client.Event;
import com.google.gwt.user.client.EventListener;
import com.google.gwt.user.client.Window;
import com.google.gwt.user.client.ui.RootPanel;
import com.google.gwt.user.client.ui.TextBox;
import com.google.gwt.user.client.ui.Widget;
import com.vaadin.shared.ui.ErrorLevel;
import com.vaadin.shared.util.SharedUtil;
/**
* Utility methods which are related to client side code only.
*/
public class WidgetUtil {
/**
* Simple object to store another object.
*
* @param <T>
* the object type to store
* @since 8.4
*/
public static class Reference<T> {
T reference = null;
/**
* Gets the current object.
*
* @return the stored object
*/
public T get() {
return reference;
}
/**
* Sets the current object.
*
* @param reference
* the object to store
*/
public void set(T reference) {
this.reference = reference;
}
}
/**
* Helper method for debugging purposes.
*
* Stops execution on firefox browsers on a breakpoint.
*
*/
public static native void browserDebugger()
/*-{
if ($wnd.console)
debugger;
}-*/;
/**
* Redirects the browser to the given url or refreshes the page if url is
* null.
*
* @since 7.6
* @param url
* The url to redirect to or null to refresh
*/
public static native void redirect(String url)
/*-{
if (url) {
$wnd.location = url;
} else {
$wnd.location.reload(false);
}
}-*/;
/**
* Helper method for a bug fix #14041. For mozilla getKeyCode return 0 for
* space bar (because space is considered as char). If return 0 use
* getCharCode.
*
* @param event
* @return return key code
* @since 7.2.4
*/
public static int getKeyCode(KeyEvent<?> event) {
int keyCode = event.getNativeEvent().getKeyCode();
if (keyCode == 0) {
keyCode = event.getNativeEvent().getCharCode();
}
return keyCode;
}
/**
*
* Returns the topmost element of from given coordinates.
*
* TODO fix crossplat issues clientX vs pageX. See quircksmode. Not critical
* for vaadin as we scroll div istead of page.
*
* @param x
* @param y
* @return the element at given coordinates
*/
public static native Element getElementFromPoint(int clientX, int clientY)
/*-{
var el = $wnd.document.elementFromPoint(clientX, clientY);
if (el != null && el.nodeType == 3) {
el = el.parentNode;
}
return el;
}-*/;
public static float parseRelativeSize(String size) {
if (size == null || !size.endsWith("%")) {
return -1;
}
try {
return Float.parseFloat(size.substring(0, size.length() - 1));
} catch (Exception e) {
getLogger().warning("Unable to parse relative size");
return -1;
}
}
private static final Element ESCAPE_HTML_HELPER = DOM.createDiv();
/**
* Converts html entities to text.
*
* @param html
* @return escaped string presentation of given html
*/
public static String escapeHTML(String html) {
DOM.setInnerText(ESCAPE_HTML_HELPER, html);
String escapedText = DOM.getInnerHTML(ESCAPE_HTML_HELPER);
return escapedText;
}
/**
* Escapes the string so it is safe to write inside an HTML attribute.
*
* @param attribute
* The string to escape
* @return An escaped version of <literal>attribute</literal>.
*/
public static String escapeAttribute(String attribute) {
if (attribute == null) {
return "";
}
attribute = attribute.replace("\"", """);
attribute = attribute.replace("'", "'");
attribute = attribute.replace(">", ">");
attribute = attribute.replace("<", "<");
attribute = attribute.replace("&", "&");
return attribute;
}
/**
* Clones given element as in JavaScript.
*
* Deprecate this if there appears similar method into GWT someday.
*
* @param element
* @param deep
* clone child tree also
* @return
*/
public static native Element cloneNode(Element element, boolean deep)
/*-{
return element.cloneNode(deep);
}-*/;
public static int measureHorizontalPaddingAndBorder(Element element,
int paddingGuess) {
String originalWidth = DOM.getStyleAttribute(element, "width");
int originalOffsetWidth = element.getOffsetWidth();
int widthGuess = (originalOffsetWidth - paddingGuess);
if (widthGuess < 1) {
widthGuess = 1;
}
element.getStyle().setWidth(widthGuess, Unit.PX);
int padding = element.getOffsetWidth() - widthGuess;
element.getStyle().setProperty("width", originalWidth);
return padding;
}
public static int measureVerticalPaddingAndBorder(Element element,
int paddingGuess) {
String originalHeight = DOM.getStyleAttribute(element, "height");
int originalOffsetHeight = element.getOffsetHeight();
int widthGuess = (originalOffsetHeight - paddingGuess);
if (widthGuess < 1) {
widthGuess = 1;
}
element.getStyle().setHeight(widthGuess, Unit.PX);
int padding = element.getOffsetHeight() - widthGuess;
element.getStyle().setProperty("height", originalHeight);
return padding;
}
public static int measureHorizontalBorder(Element element) {
int borders;
if (BrowserInfo.get().isIE()) {
String width = element.getStyle().getProperty("width");
String height = element.getStyle().getProperty("height");
int offsetWidth = element.getOffsetWidth();
int offsetHeight = element.getOffsetHeight();
if (offsetHeight < 1) {
offsetHeight = 1;
}
if (offsetWidth < 1) {
offsetWidth = 10;
}
element.getStyle().setPropertyPx("height", offsetHeight);
element.getStyle().setPropertyPx("width", offsetWidth);
borders = element.getOffsetWidth() - element.getClientWidth();
element.getStyle().setProperty("width", width);
element.getStyle().setProperty("height", height);
} else {
borders = element.getOffsetWidth()
- element.getPropertyInt("clientWidth");
}
assert borders >= 0;
return borders;
}
public static int measureVerticalBorder(Element element) {
int borders;
if (BrowserInfo.get().isIE()) {
String width = element.getStyle().getProperty("width");
String height = element.getStyle().getProperty("height");
int offsetWidth = element.getOffsetWidth();
int offsetHeight = element.getOffsetHeight();
if (offsetHeight < 1) {
offsetHeight = 1;
}
if (offsetWidth < 1) {
offsetWidth = 10;
}
element.getStyle().setPropertyPx("width", offsetWidth);
element.getStyle().setPropertyPx("height", offsetHeight);
borders = element.getOffsetHeight()
- element.getPropertyInt("clientHeight");
element.getStyle().setProperty("height", height);
element.getStyle().setProperty("width", width);
} else {
borders = element.getOffsetHeight()
- element.getPropertyInt("clientHeight");
}
assert borders >= 0;
return borders;
}
public static int measureMarginLeft(Element element) {
return element.getAbsoluteLeft()
- element.getParentElement().getAbsoluteLeft();
}
public static int setHeightExcludingPaddingAndBorder(Widget widget,
String height, int paddingBorderGuess) {
if (height.isEmpty()) {
setHeight(widget, "");
return paddingBorderGuess;
} else if (height.endsWith("px")) {
int pixelHeight = Integer
.parseInt(height.substring(0, height.length() - 2));
return setHeightExcludingPaddingAndBorder(widget.getElement(),
pixelHeight, paddingBorderGuess, false);
} else {
// Set the height in unknown units
setHeight(widget, height);
// Use the offsetWidth
return setHeightExcludingPaddingAndBorder(widget.getElement(),
widget.getOffsetHeight(), paddingBorderGuess, true);
}
}
private static void setWidth(Widget widget, String width) {
widget.getElement().getStyle().setProperty("width", width);
}
private static void setHeight(Widget widget, String height) {
widget.getElement().getStyle().setProperty("height", height);
}
public static int setWidthExcludingPaddingAndBorder(Widget widget,
String width, int paddingBorderGuess) {
if (width.isEmpty()) {
setWidth(widget, "");
return paddingBorderGuess;
} else if (width.endsWith("px")) {
int pixelWidth = Integer
.parseInt(width.substring(0, width.length() - 2));
return setWidthExcludingPaddingAndBorder(widget.getElement(),
pixelWidth, paddingBorderGuess, false);
} else {
setWidth(widget, width);
return setWidthExcludingPaddingAndBorder(widget.getElement(),
widget.getOffsetWidth(), paddingBorderGuess, true);
}
}
public static int setWidthExcludingPaddingAndBorder(Element element,
int requestedWidth, int horizontalPaddingBorderGuess,
boolean requestedWidthIncludesPaddingBorder) {
int widthGuess = requestedWidth - horizontalPaddingBorderGuess;
if (widthGuess < 0) {
widthGuess = 0;
}
element.getStyle().setWidth(widthGuess, Unit.PX);
int captionOffsetWidth = DOM.getElementPropertyInt(element,
"offsetWidth");
int actualPadding = captionOffsetWidth - widthGuess;
if (requestedWidthIncludesPaddingBorder) {
actualPadding += actualPadding;
}
if (actualPadding != horizontalPaddingBorderGuess) {
int w = requestedWidth - actualPadding;
if (w < 0) {
// Cannot set negative width even if we would want to
w = 0;
}
element.getStyle().setWidth(w, Unit.PX);
}
return actualPadding;
}
public static int setHeightExcludingPaddingAndBorder(Element element,
int requestedHeight, int verticalPaddingBorderGuess,
boolean requestedHeightIncludesPaddingBorder) {
int heightGuess = requestedHeight - verticalPaddingBorderGuess;
if (heightGuess < 0) {
heightGuess = 0;
}
element.getStyle().setHeight(heightGuess, Unit.PX);
int captionOffsetHeight = DOM.getElementPropertyInt(element,
"offsetHeight");
int actualPadding = captionOffsetHeight - heightGuess;
if (requestedHeightIncludesPaddingBorder) {
actualPadding += actualPadding;
}
if (actualPadding != verticalPaddingBorderGuess) {
int h = requestedHeight - actualPadding;
if (h < 0) {
// Cannot set negative height even if we would want to
h = 0;
}
element.getStyle().setHeight(h, Unit.PX);
}
return actualPadding;
}
public static void setFloat(Element element, String value) {
if (BrowserInfo.get().isIE()) {
element.getStyle().setProperty("styleFloat", value);
} else {
element.getStyle().setProperty("cssFloat", value);
}
}
private static int detectedScrollbarSize = -1;
private static int detectedSubPixelRoundingFactor = -1;
public static int getNativeScrollbarSize() {
if (detectedScrollbarSize < 0) {
Element scroller = DOM.createDiv();
scroller.getStyle().setProperty("width", "50px");
scroller.getStyle().setProperty("height", "50px");
scroller.getStyle().setProperty("overflow", "scroll");
scroller.getStyle().setProperty("position", "absolute");
scroller.getStyle().setProperty("marginLeft", "-5000px");
RootPanel.getBodyElement().appendChild(scroller);
detectedScrollbarSize = scroller.getOffsetWidth()
- scroller.getPropertyInt("clientWidth");
RootPanel.getBodyElement().removeChild(scroller);
}
return detectedScrollbarSize;
}
/**
* Defers the execution of {@link #runWebkitOverflowAutoFix(Element)}.
*
* @since 7.2.6
* @param elem
* with overflow auto
*/
public static void runWebkitOverflowAutoFixDeferred(final Element elem) {
Scheduler.get().scheduleDeferred(
() -> WidgetUtil.runWebkitOverflowAutoFix(elem));
}
/**
* Run workaround for webkits overflow auto issue.
*
* See: our bug #2138 and https://bugs.webkit.org/show_bug.cgi?id=21462
*
* @param elem
* with overflow auto
*/
public static void runWebkitOverflowAutoFix(final Element elem) {
// Add max version if fix lands sometime to Webkit
// Starting from Opera 11.00, also a problem in Opera
if (BrowserInfo.get().requiresOverflowAutoFix()) {
final String originalOverflow = elem.getStyle()
.getProperty("overflow");
final String originalOverflowX = elem.getStyle()
.getProperty("overflowX");
final String originalOverflowY = elem.getStyle()
.getProperty("overflowY");
if ("hidden".equals(originalOverflow)
|| "hidden".equals(originalOverflowX)
|| "hidden".equals(originalOverflowY)) {
return;
}
// check the scrolltop value before hiding the element
final int scrolltop = elem.getScrollTop();
final int scrollleft = elem.getScrollLeft();
elem.getStyle().setProperty("overflow", "hidden");
Scheduler.get().scheduleFinally(() -> {
// Dough, Safari scroll auto means actually just a moped
elem.getStyle().setProperty("overflow", originalOverflow);
if (!originalOverflowX.isEmpty()) {
elem.getStyle().setProperty("overflowX", originalOverflowX);
}
if (!originalOverflowY.isEmpty()) {
elem.getStyle().setProperty("overflowY", originalOverflowY);
}
if (scrolltop > 0 || elem.getScrollTop() > 0) {
int scrollvalue = scrolltop;
if (scrollvalue == 0) {
// mysterious are the ways of webkits scrollbar
// handling. In some cases webkit reports bad (0)
// scrolltop before hiding the element temporary,
// sometimes after.
scrollvalue = elem.getScrollTop();
}
// fix another bug where scrollbar remains in wrong
// position
elem.setScrollTop(scrollvalue - 1);
elem.setScrollTop(scrollvalue);
}
// fix for #6940 : Table horizontal scroll sometimes not
// updated when collapsing/expanding columns
// Also appeared in Safari 5.1 with webkit 534 (#7667)
if ((BrowserInfo.get().isChrome()
|| (BrowserInfo.get().isSafariOrIOS()
&& BrowserInfo.get().getWebkitVersion() >= 534))
&& (scrollleft > 0 || elem.getScrollLeft() > 0)) {
int scrollvalue = scrollleft;
if (scrollvalue == 0) {
// mysterious are the ways of webkits scrollbar
// handling. In some cases webkit may report a bad
// (0) scrollleft before hiding the element
// temporary, sometimes after.
scrollvalue = elem.getScrollLeft();
}
// fix another bug where scrollbar remains in wrong
// position
elem.setScrollLeft(scrollvalue - 1);
elem.setScrollLeft(scrollvalue);
}
});
}
}
public static void alert(String string) {
if (true) {
Window.alert(string);
}
}
/**
* Gets the border-box width for the given element, i.e. element width +
* border + padding. Always rounds up to nearest integer.
*
* @param element
* The element to check
* @return The border-box width for the element
*/
public static int getRequiredWidth(
com.google.gwt.dom.client.Element element) {
int reqWidth = getRequiredWidthBoundingClientRect(element);
if (BrowserInfo.get().isIE()) {
int csSize = getRequiredWidthComputedStyle(element);
if (csSize == reqWidth + 1) {
// If computed style reports one pixel larger than requiredWidth
// we would be rounding in the wrong direction in IE9. Round up
// instead.
// We do not always use csSize as it e.g. for 100% wide Labels
// in GridLayouts produces senseless values (see e.g.
// ThemeTestUI with Runo).
return csSize;
}
}
return reqWidth;
}
/**
* Gets the border-box width for the given element, i.e. element width +
* border + padding.
*
* @since 7.5.1
* @param element
* The element to check
* @return The border-box width for the element
*/
public static double getRequiredWidthDouble(
com.google.gwt.dom.client.Element element) {
double reqWidth = getRequiredWidthBoundingClientRectDouble(element);
if (BrowserInfo.get().isIE()) {
double csWidth = getRequiredWidthComputedStyleDouble(element);
if (csWidth > reqWidth && csWidth <= (reqWidth + 1)) {
// IE9 rounds reqHeight to integers BUT sometimes reports wrong
// csHeight it seems, so we only use csHeight if it is within a
// rounding error
return csWidth;
}
}
return reqWidth;
}
/**
* Gets the border-box height for the given element, i.e. element height +
* border + padding. Always rounds up to nearest integer.
*
* @param element
* The element to check
* @return The border-box height for the element
*/
public static int getRequiredHeight(
com.google.gwt.dom.client.Element element) {
int reqHeight = getRequiredHeightBoundingClientRect(element);
if (BrowserInfo.get().isIE()) {
int csSize = getRequiredHeightComputedStyle(element);
if (csSize == reqHeight + 1) {
// If computed style reports one pixel larger than
// requiredHeight we would be rounding in the wrong direction in
// IE9. Round up instead.
// We do not always use csSize as it e.g. for 100% wide Labels
// in GridLayouts produces senseless values (see e.g.
// ThemeTestUI with Runo).
return csSize;
}
}
return reqHeight;
}
/**
* Gets the border-box height for the given element, i.e. element height +
* border + padding.
*
* @since 7.5.1
* @param element
* The element to check
* @return The border-box height for the element
*/
public static double getRequiredHeightDouble(
com.google.gwt.dom.client.Element element) {
double reqHeight = getRequiredHeightBoundingClientRectDouble(element);
if (BrowserInfo.get().isIE()) {
double csHeight = getRequiredHeightComputedStyleDouble(element);
if (csHeight > reqHeight && csHeight <= (reqHeight + 1)) {
// IE9 rounds reqHeight to integers BUT sometimes reports wrong
// csHeight it seems, so we only use csHeight if it is within a
// rounding error
// Although sometimes it also happens that IE9 returns an
// incorrectly rounded down requiredHeight and a computed height
// which is exactly one larger, hence the "<="...
return csHeight;
}
}
return reqHeight;
}
/**
* Calculates the width of the element's bounding rectangle.
* <p>
* In case the browser doesn't support bounding rectangles, the returned
* value is the offset width.
*
* @param element
* the element of which to calculate the width
* @return the width of the element
*/
public static int getRequiredWidthBoundingClientRect(
com.google.gwt.dom.client.Element element) {
return (int) Math
.ceil(getRequiredWidthBoundingClientRectDouble(element));
}
/**
* Calculates the width of the element's bounding rectangle to subpixel
* precision.
* <p>
* In case the browser doesn't support bounding rectangles, the returned
* value is the offset width.
*
* @param element
* the element of which to calculate the width
* @return the subpixel-accurate width of the element
* @since 7.4
*/
public static native double getRequiredWidthBoundingClientRectDouble(
com.google.gwt.dom.client.Element element)
/*-{
if (element.getBoundingClientRect) {
var rect = element.getBoundingClientRect();
return rect.right - rect.left;
} else {
return element.offsetWidth;
}
}-*/;
public static int getRequiredHeightComputedStyle(
com.google.gwt.dom.client.Element element) {
return (int) Math.ceil(getRequiredHeightComputedStyleDouble(element));
}
public static native double getRequiredHeightComputedStyleDouble(
com.google.gwt.dom.client.Element element)
/*-{
var cs = element.ownerDocument.defaultView.getComputedStyle(element);
var heightPx = cs.height;
if (heightPx == 'auto') {
// Fallback for inline elements
return @com.vaadin.client.WidgetUtil::getRequiredHeightBoundingClientRectDouble(Lcom/google/gwt/dom/client/Element;)(element);
}
var height = parseFloat(heightPx); // Will automatically skip "px" suffix
var border = parseFloat(cs.borderTopWidth) + parseFloat(cs.borderBottomWidth); // Will automatically skip "px" suffix
var padding = parseFloat(cs.paddingTop) + parseFloat(cs.paddingBottom); // Will automatically skip "px" suffix
return height+border+padding;
}-*/;
public static int getRequiredWidthComputedStyle(
com.google.gwt.dom.client.Element element) {
return (int) Math.ceil(getRequiredWidthComputedStyleDouble(element));
}
public static native int getRequiredWidthComputedStyleDouble(
com.google.gwt.dom.client.Element element)
/*-{
var cs = element.ownerDocument.defaultView.getComputedStyle(element);
var widthPx = cs.width;
if (widthPx == 'auto') {
// Fallback for inline elements
return @com.vaadin.client.WidgetUtil::getRequiredWidthBoundingClientRectDouble(Lcom/google/gwt/dom/client/Element;)(element);
}
var width = parseFloat(widthPx); // Will automatically skip "px" suffix
var border = parseFloat(cs.borderLeftWidth) + parseFloat(cs.borderRightWidth); // Will automatically skip "px" suffix
var padding = parseFloat(cs.paddingLeft) + parseFloat(cs.paddingRight); // Will automatically skip "px" suffix
return width+border+padding;
}-*/;
/**
* Calculates the height of the element's bounding rectangle.
* <p>
* In case the browser doesn't support bounding rectangles, the returned
* value is the offset height.
*
* @param element
* the element of which to calculate the height
* @return the height of the element
*/
public static int getRequiredHeightBoundingClientRect(
com.google.gwt.dom.client.Element element) {
return (int) Math
.ceil(getRequiredHeightBoundingClientRectDouble(element));
}
/**
* Calculates the height of the element's bounding rectangle to subpixel
* precision.
* <p>
* In case the browser doesn't support bounding rectangles, the returned
* value is the offset height.
*
* @param element
* the element of which to calculate the height
* @return the subpixel-accurate height of the element
* @since 7.4
*/
public static native double getRequiredHeightBoundingClientRectDouble(
com.google.gwt.dom.client.Element element)
/*-{
var height;
if (element.getBoundingClientRect != null) {
var rect = element.getBoundingClientRect();
height = rect.bottom - rect.top;
} else {
height = element.offsetHeight;
}
return height;
}-*/;
public static int getRequiredWidth(Widget widget) {
return getRequiredWidth(widget.getElement());
}
public static int getRequiredHeight(Widget widget) {
return getRequiredHeight(widget.getElement());
}
/**
* Detects what is currently the overflow style attribute in given element.
*
* @param pe
* the element to detect
* @return true if auto or scroll
*/
public static boolean mayHaveScrollBars(
com.google.gwt.dom.client.Element pe) {
String overflow = getComputedStyle(pe, "overflow");
if (overflow != null) {
return overflow.equals("auto") || overflow.equals("scroll");
} else {
return false;
}
}
/**
* A simple helper method to detect "computed style" (aka style sheets +
* element styles). Values returned differ a lot depending on browsers.
* Always be very careful when using this.
*
* @param el
* the element from which the style property is detected
* @param p
* the property to detect
* @return String value of style property
*/
private static native String getComputedStyle(
com.google.gwt.dom.client.Element el, String p)
/*-{
try {
if (el.currentStyle) {
// IE
return el.currentStyle[p];
} else if (window.getComputedStyle) {
// Sa, FF, Opera
var view = el.ownerDocument.defaultView;
return view.getComputedStyle(el,null).getPropertyValue(p);
} else {
// fall back for non IE, Sa, FF, Opera
return "";
}
} catch (e) {
return "";
}
}-*/;
/**
* Will (attempt) to focus the given DOM Element.
*
* @param el
* the element to focus
*/
public static native void focus(Element el)
/*-{
try {
el.focus();
} catch (e) {
}
}-*/;
/**
* Helper method to find first instance of any Widget found by traversing
* DOM upwards from given element.
* <p>
* <strong>Note:</strong> If {@code element} is inside some widget {@code W}
* , <em>and</em> {@code W} in turn is wrapped in a
* {@link com.google.gwt.user.client.ui.Composite Composite} {@code C}, this
* method will not find {@code W} but returns {@code C}. This may also be
* the case with other Composite-like classes that hijack the event handling
* of their child widget(s).
*
* @param element
* the element where to start seeking of Widget
* @since 8.1
*/
@SuppressWarnings("unchecked")
public static <T> T findWidget(Element element) {
return findWidget(element, null);
}
/**
* Helper method to find first instance of given Widget type found by
* traversing DOM upwards from given element.
* <p>
* <strong>Note:</strong> If {@code element} is inside some widget {@code W}
* , <em>and</em> {@code W} in turn is wrapped in a
* {@link com.google.gwt.user.client.ui.Composite Composite} {@code C}, this
* method will not find {@code W}. It returns either {@code C} or null,
* depending on whether the class parameter matches. This may also be the
* case with other Composite-like classes that hijack the event handling of
* their child widget(s).
* <p>
* Only accepts the exact class {@code class1} if not null.
*
* @param element
* the element where to start seeking of Widget
* @param class1
* the Widget type to seek for, null for any
*/
@SuppressWarnings("unchecked")
public static <T> T findWidget(Element element,
Class<? extends Widget> class1) {
return findWidget(element, class1, true);
}
/**
* Helper method to find first instance of given Widget type found by
* traversing DOM upwards from given element.
* <p>
* <strong>Note:</strong> If {@code element} is inside some widget {@code W}
* , <em>and</em> {@code W} in turn is wrapped in a
* {@link com.google.gwt.user.client.ui.Composite Composite} {@code C}, this
* method will not find {@code W}. It returns either {@code C} or null,
* depending on whether the class parameter matches. This may also be the
* case with other Composite-like classes that hijack the event handling of
* their child widget(s).
*
* @param element
* the element where to start seeking of Widget
* @param class1
* the Widget type to seek for
* @param exactMatch
* true to only accept class1, false to also accept its
* superclasses
* @since 8.1
*/
@SuppressWarnings("unchecked")
public static <T> T findWidget(Element element,
Class<? extends Widget> class1, boolean exactMatch) {
if (element != null) {
/* First seek for the first EventListener (~Widget) from dom */
EventListener eventListener = null;
while (eventListener == null && element != null) {
eventListener = Event.getEventListener(element);
if (eventListener == null
|| !(eventListener instanceof Widget)) {
element = element.getParentElement();
eventListener = null;
}
}
if (eventListener instanceof Widget) {
/*
* Then find the first widget of type class1 from widget
* hierarchy
*/
Widget w = (Widget) eventListener;
if (class1 == null && w != null) {
return (T) w;
}
while (w != null) {
Class<?> widgetClass = w.getClass();
while (widgetClass != null) {
if (widgetClass == class1) {
return (T) w;
}
// terminate after first check if looking for exact
// match
widgetClass = exactMatch ? null
: widgetClass.getSuperclass();
}
w = w.getParent();
}
}
}
return null;
}
/**
* Force webkit to redraw an element.
*
* @param element
* The element that should be redrawn
*/
public static void forceWebkitRedraw(Element element) {
Style style = element.getStyle();
String s = style.getProperty("webkitTransform");
if (s == null || s.isEmpty()) {
style.setProperty("webkitTransform", "scale(1)");
} else {
style.setProperty("webkitTransform", "");
}
}
/**
* Performs a hack to trigger a re-layout in the IE browser. This is usually
* necessary in cases where IE "forgets" to update child elements when they
* resize.
*
* @since 7.3
* @param e
* The element to perform the hack on
*/
public static void forceIERedraw(Element e) {
if (BrowserInfo.get().isIE()) {
setStyleTemporarily(e, "zoom", "1");
}
}
/**
* Detaches and re-attaches the element from its parent. The element is
* reattached at the same position in the DOM as it was before.
*
* Does nothing if the element is not attached to the DOM.
*
* @param element
* The element to detach and re-attach
*/
public static void detachAttach(Element element) {
if (element == null) {
return;
}
Node nextSibling = element.getNextSibling();
Node parent = element.getParentNode();
if (parent == null) {
return;
}
parent.removeChild(element);
if (nextSibling == null) {
parent.appendChild(element);
} else {
parent.insertBefore(element, nextSibling);
}
}
public static void sinkOnloadForImages(Element element) {
NodeList<com.google.gwt.dom.client.Element> imgElements = element
.getElementsByTagName("img");
for (int i = 0; i < imgElements.getLength(); i++) {
DOM.sinkEvents(imgElements.getItem(i), Event.ONLOAD);
}
}
/**
* Returns the index of the childElement within its parent.
*
* @param subElement
* @return
*/
public static int getChildElementIndex(Element childElement) {
int idx = 0;
Node n = childElement;
while ((n = n.getPreviousSibling()) != null) {
idx++;
}
return idx;
}
/**
* Temporarily sets the {@code styleProperty} to {@code tempValue} and then
* resets it to its current value. Used mainly to work around rendering
* issues in IE (and possibly in other browsers)
*
* @param element
* The target element
* @param styleProperty
* The name of the property to set
* @param tempValue
* The temporary value
*/
public static void setStyleTemporarily(Element element,
final String styleProperty, String tempValue) {
final Style style = element.getStyle();
final String currentValue = style.getProperty(styleProperty);
style.setProperty(styleProperty, tempValue);
// Read a style-based property to force the browser to recalculate the
// element's dimensions with the temporary style.
element.getOffsetWidth();
style.setProperty(styleProperty, currentValue);
}
/**
* A helper method to return the client position from an event. Returns
* position from either first changed touch (if touch event) or from the
* event itself.
*
* @param event
* @return
*/
public static int getTouchOrMouseClientX(Event event) {
if (isTouchEvent(event)) {
return event.getChangedTouches().get(0).getClientX();
} else {
return event.getClientX();
}
}
/**
* Find the element corresponding to the coordinates in the passed mouse
* event. Please note that this is not always the same as the target of the
* event e.g. if event capture is used.
*
* @param event
* the mouse event to get coordinates from
* @return the element at the coordinates of the event
*/
public static Element getElementUnderMouse(NativeEvent event) {
int pageX = getTouchOrMouseClientX(event);
int pageY = getTouchOrMouseClientY(event);
return getElementFromPoint(pageX, pageY);
}
/**
* A helper method to return the client position from an event. Returns
* position from either first changed touch (if touch event) or from the
* event itself.
*
* @param event
* @return
*/
public static int getTouchOrMouseClientY(Event event) {
if (isTouchEvent(event)) {
return event.getChangedTouches().get(0).getClientY();
} else {
return event.getClientY();
}
}
/**
*
* @see #getTouchOrMouseClientY(Event)
* @param currentGwtEvent
* @return
*/
public static int getTouchOrMouseClientY(NativeEvent currentGwtEvent) {
return getTouchOrMouseClientY(Event.as(currentGwtEvent));
}
/**
* @see #getTouchOrMouseClientX(Event)
*
* @param event
* @return
*/
public static int getTouchOrMouseClientX(NativeEvent event) {
return getTouchOrMouseClientX(Event.as(event));
}
public static boolean isTouchEvent(Event event) {
return event.getType().contains("touch");
}
public static boolean isTouchEvent(NativeEvent event) {
return isTouchEvent(Event.as(event));
}
public static void simulateClickFromTouchEvent(Event touchevent,
Widget widget) {
Touch touch = touchevent.getChangedTouches().get(0);
final NativeEvent createMouseUpEvent = Document.get()
.createMouseUpEvent(0, touch.getScreenX(), touch.getScreenY(),
touch.getClientX(), touch.getClientY(), false, false,
false, false, NativeEvent.BUTTON_LEFT);
final NativeEvent createMouseDownEvent = Document.get()
.createMouseDownEvent(0, touch.getScreenX(), touch.getScreenY(),
touch.getClientX(), touch.getClientY(), false, false,
false, false, NativeEvent.BUTTON_LEFT);
final NativeEvent createMouseClickEvent = Document.get()
.createClickEvent(0, touch.getScreenX(), touch.getScreenY(),
touch.getClientX(), touch.getClientY(), false, false,
false, false);
/*
* Get target with element from point as we want the actual element, not
* the one that sunk the event.
*/
final Element target = getElementFromPoint(touch.getClientX(),
touch.getClientY());
/*
* Fixes infocusable form fields in Safari of iOS 5.x and some Android
* browsers.
*/
Widget targetWidget = findWidget(target);
if (targetWidget instanceof com.google.gwt.user.client.ui.Focusable) {
final com.google.gwt.user.client.ui.Focusable toBeFocusedWidget = (com.google.gwt.user.client.ui.Focusable) targetWidget;
toBeFocusedWidget.setFocus(true);
} else if (targetWidget instanceof Focusable) {
((Focusable) targetWidget).focus();
}
Scheduler.get().scheduleDeferred(() -> {
try {
target.dispatchEvent(createMouseDownEvent);
target.dispatchEvent(createMouseUpEvent);
target.dispatchEvent(createMouseClickEvent);
} catch (Exception e) {
}
});
}
/**
* Gets the currently focused element.
*
* @return The active element or null if no active element could be found.
*/
public static native Element getFocusedElement()
/*-{
if ($wnd.document.activeElement) {
return $wnd.document.activeElement;
}
return null;
}-*/;
/**
* Gets currently focused element and checks if it's editable.
*
* @since 7.4
*
* @return true if focused element is editable
*/
public static boolean isFocusedElementEditable() {
Element focusedElement = WidgetUtil.getFocusedElement();
if (focusedElement != null) {
String tagName = focusedElement.getTagName();
String contenteditable = focusedElement
.getAttribute("contenteditable");
return "textarea".equalsIgnoreCase(tagName)
|| "input".equalsIgnoreCase(tagName)
|| "true".equalsIgnoreCase(contenteditable);
}
return false;
}
/**
* Kind of stronger version of isAttached(). In addition to std isAttached,
* this method checks that this widget nor any of its parents is hidden. Can
* be e.g used to check whether component should react to some events or
* not.
*
* @param widget
* @return true if attached and displayed
*/
public static boolean isAttachedAndDisplayed(Widget widget) {
if (widget.isAttached()) {
/*
* Failfast using offset size, then by iterating the widget tree
*/
boolean notZeroSized = widget.getOffsetHeight() > 0
|| widget.getOffsetWidth() > 0;
return notZeroSized || checkVisibilityRecursively(widget);
} else {
return false;
}
}
private static boolean checkVisibilityRecursively(Widget widget) {
if (widget.isVisible()) {
Widget parent = widget.getParent();
if (parent == null) {
return true; // root panel
} else {
return checkVisibilityRecursively(parent);
}
} else {
return false;
}
}
/**
* Scrolls an element into view vertically only. Modified version of
* Element.scrollIntoView.
*
* @param elem
* The element to scroll into view
*/
public static native void scrollIntoViewVertically(Element elem)
/*-{
var top = elem.offsetTop;
var height = elem.offsetHeight;
if (elem.parentNode != elem.offsetParent) {
top -= elem.parentNode.offsetTop;
}
var cur = elem.parentNode;
while (cur && (cur.nodeType == 1)) {
if (top < cur.scrollTop) {
cur.scrollTop = top;
}
if (top + height > cur.scrollTop + cur.clientHeight) {
cur.scrollTop = (top + height) - cur.clientHeight;
}
var offsetTop = cur.offsetTop;
if (cur.parentNode != cur.offsetParent) {
offsetTop -= cur.parentNode.offsetTop;
}
top += offsetTop - cur.scrollTop;
cur = cur.parentNode;
}
}-*/;
/**
* Checks if the given event is either a touch event or caused by the left
* mouse button.
*
* @param event
* @return true if the event is a touch event or caused by the left mouse
* button, false otherwise
*/
public static boolean isTouchEventOrLeftMouseButton(Event event) {
boolean touchEvent = WidgetUtil.isTouchEvent(event);
return touchEvent || event.getButton() == Event.BUTTON_LEFT;
}
/**
* Resolve a relative URL to an absolute URL based on the current document's
* location.
*
* @param url
* a string with the relative URL to resolve
* @return the corresponding absolute URL as a string
*/
public static String getAbsoluteUrl(String url) {
AnchorElement a = Document.get().createAnchorElement();
a.setHref(url);
return a.getHref();
}
/**
* Sets the selection range of an input element.
*
* We need this JSNI function to set selection range so that we can use the
* optional direction attribute to set the anchor to the end and the focus
* to the start. This makes Firefox work the same way as other browsers
* (#13477)
*
* @param elem
* the html input element.
* @param pos
* the index of the first selected character.
* @param length
* the selection length.
* @param direction
* a string indicating the direction in which the selection was
* performed. This may be "forward" or "backward", or "none" if
* the direction is unknown or irrelevant.
*
* @since 7.3
*/
public static native void setSelectionRange(Element elem, int pos,
int length, String direction)
/*-{
try {
elem.setSelectionRange(pos, pos + length, direction);
} catch (e) {
// Firefox throws exception if TextBox is not visible, even if attached
}
}-*/;
/**
* JavaScript hack to prevent text selection in various browsers.
*
* @since 7.6
* @param e
* element for enabling or disabling text selection
* @param enable
* <code>true</code> if selection is enabled; <code>false</code>
* if not
*/
public static native void setTextSelectionEnabled(Element e, boolean enable)
/*-{
if (!enable) {
e.ondrag = function () { return false; };
e.onselectstart = function () { return false; };
e.style.webkitUserSelect = "none";
} else {
e.ondrag = null;
e.onselectstart = null;
e.style.webkitUserSelect = "text";
}
}-*/;
/**
* JavaScript hack to clear text selection in various browsers.
*
* @since 7.6
*/
public static native void clearTextSelection()
/*-{
if ($wnd.getSelection) {
$wnd.getSelection().removeAllRanges();
}
}-*/;
/**
* The allowed value inaccuracy when comparing two double-typed pixel
* values.
* <p>
* Since we're comparing pixels on a screen, epsilon must be less than 1.
* 0.49 was deemed a perfectly fine and beautifully round number.
*/
public static final double PIXEL_EPSILON = 0.49d;
/**
* Compares two double values with the error margin of
* {@link #PIXEL_EPSILON} (i.e. {@value #PIXEL_EPSILON})
*
* @param num1
* the first value for which to compare equality
* @param num2
* the second value for which to compare equality
* @since 7.4
*
* @return true if the values are considered equals; false otherwise
*/
public static boolean pixelValuesEqual(final double num1,
final double num2) {
return Math.abs(num1 - num2) <= PIXEL_EPSILON;
}
public static native TextRectangle getBoundingClientRect(Element e)
/*-{
return e.getBoundingClientRect();
}-*/;
public static final class TextRectangle extends JavaScriptObject {
protected TextRectangle() {
}
public native double getBottom()
/*-{
return this.bottom;
}-*/;
public native double getHeight()
/*-{
return this.height;
}-*/;
public native double getLeft()
/*-{
return this.left;
}-*/;
public native double getRight()
/*-{
return this.right;
}-*/;
public native double getTop()
/*-{
return this.top;
}-*/;
public native double getWidth()
/*-{
return this.width;
}-*/;
}
/**
* Wrap a css size value and its unit and translate back and forth to the
* string representation.<br/>
* E.g. 50%, 123px, ...
*
* @since 7.2.6
* @author Vaadin Ltd
*/
@SuppressWarnings("serial")
public static class CssSize implements Serializable {
/*
* Map the size units with their type.
*/
private static Map<String, Unit> type2Unit = new HashMap<>();
static {
for (Unit unit : Unit.values()) {
type2Unit.put(unit.getType(), unit);
}
}
/**
* Gets the unit value by its type.
*
* @param type
* the type of the unit as found in the style.
* @return the unit value.
*/
public static Unit unitByType(String type) {
return type2Unit.get(type);
}
/*
* Regex to parse the size.
*/
private static final RegExp SIZE_PATTERN = RegExp
.compile(SharedUtil.SIZE_PATTERN);
/**
* Parse the size from string format to {@link CssSize}.
*
* @param s
* the size as string.
* @return a {@link CssSize} object.
*/
public static CssSize fromString(String s) {
if (s == null) {
return null;
}
s = s.trim();
if (s.isEmpty()) {
return null;
}
float size = 0;
Unit unit = null;
MatchResult matcher = SIZE_PATTERN.exec(s);
if (matcher.getGroupCount() > 1) {
size = Float.parseFloat(matcher.getGroup(1));
if (size < 0) {
size = -1;
unit = Unit.PX;
} else {
String symbol = matcher.getGroup(2);
unit = unitByType(symbol);
}
} else {
throw new IllegalArgumentException(
"Invalid size argument: \"" + s + "\" (should match "
+ SIZE_PATTERN.getSource() + ")");
}
return new CssSize(size, unit);
}
/**
* Creates a {@link CssSize} using a value and its measurement unit.
*
* @param value
* the value.
* @param unit
* the unit.
* @return the {@link CssSize} object.
*/
public static CssSize fromValueUnit(float value, Unit unit) {
return new CssSize(value, unit);
}
/*
* The value.
*/
private final float value;
/*
* The measure unit.
*/
private final Unit unit;
private CssSize(float value, Unit unit) {
this.value = value;
this.unit = unit;
}
/**
* Gets the value for this css size.
*
* @return the value.
*/
public float getValue() {
return value;
}
/**
* Gets the measurement unit for this css size.
*
* @return the unit.
*/
public Unit getUnit() {
return unit;
}
@Override
public String toString() {
return value + unit.getType();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof CssSize) {
CssSize size = (CssSize) obj;
return size.value == value && size.unit == unit;
}
return false;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) value;
result = prime * result + ((unit == null) ? 0 : unit.hashCode());
return result;
}
/**
* Check whether the two sizes are equals.
*
* @param cssSize1
* the first size to compare.
* @param cssSize2
* the other size to compare with the first one.
* @return true if the two sizes are equals, otherwise false.
*/
public static boolean equals(String cssSize1, String cssSize2) {
return CssSize.fromString(cssSize1)
.equals(CssSize.fromString(cssSize2));
}
}
private static Logger getLogger() {
return Logger.getLogger(WidgetUtil.class.getName());
}
/**
* Returns the thickness of the given element's top border.
* <p>
* The value is determined using computed style when available and
* calculated otherwise.
*
* @since 7.5.0
* @param element
* the element to measure
* @return the top border thickness
*/
public static double getBorderTopThickness(Element element) {
return getBorderThickness(element, new String[] { "borderTopWidth" });
}
/**
* Returns the thickness of the given element's bottom border.
* <p>
* The value is determined using computed style when available and
* calculated otherwise.
*
* @since 7.5.0
* @param element
* the element to measure
* @return the bottom border thickness
*/
public static double getBorderBottomThickness(Element element) {
return getBorderThickness(element,
new String[] { "borderBottomWidth" });
}
/**
* Returns the combined thickness of the given element's top and bottom
* borders.
* <p>
* The value is determined using computed style when available and
* calculated otherwise.
*
* @since 7.5.0
* @param element
* the element to measure
* @return the top and bottom border thickness
*/
public static double getBorderTopAndBottomThickness(Element element) {
return getBorderThickness(element,
new String[] { "borderTopWidth", "borderBottomWidth" });
}
/**
* Returns the thickness of the given element's left border.
* <p>
* The value is determined using computed style when available and
* calculated otherwise.
*
* @since 7.5.0
* @param element
* the element to measure
* @return the left border thickness
*/
public static double getBorderLeftThickness(Element element) {
return getBorderThickness(element, new String[] { "borderLeftWidth" });
}
/**
* Returns the thickness of the given element's right border.
* <p>
* The value is determined using computed style when available and
* calculated otherwise.
*
* @since 7.5.0
* @param element
* the element to measure
* @return the right border thickness
*/
public static double getBorderRightThickness(Element element) {
return getBorderThickness(element, new String[] { "borderRightWidth" });
}
/**
* Returns the thickness of the given element's left and right borders.
* <p>
* The value is determined using computed style when available and
* calculated otherwise.
*
* @since 7.5.0
* @param element
* the element to measure
* @return the top border thickness
*/
public static double getBorderLeftAndRightThickness(Element element) {
return getBorderThickness(element,
new String[] { "borderLeftWidth", "borderRightWidth" });
}
private static native double getBorderThickness(
com.google.gwt.dom.client.Element element, String[] borderNames)
/*-{
if (typeof $wnd.getComputedStyle === 'function') {
var computedStyle = $wnd.getComputedStyle(element);
var width = 0;
for (i=0; i< borderNames.length; i++) {
var borderWidth = computedStyle[borderNames[i]];
width += parseFloat(borderWidth);
}
return width;
} else {
var parentElement = element.offsetParent;
var cloneElement = element.cloneNode(false);
cloneElement.style.boxSizing ="content-box";
parentElement.appendChild(cloneElement);
var heightWithBorder = cloneElement.offsetHeight;
for (i=0; i< borderNames.length; i++) {
cloneElement.style[borderNames[i]] = "0";
}
var heightWithoutBorder = cloneElement.offsetHeight;
parentElement.removeChild(cloneElement);
return heightWithBorder - heightWithoutBorder;
}
}-*/;
/**
* Rounds the given size up to a value which the browser will accept.
*
* Safari/WebKit uses 1/64th of a pixel to enable using integer math
* (http://trac.webkit.org/wiki/LayoutUnit).
*
* Firefox uses 1/60th of a pixel because it is divisible by three
* (https://bugzilla.mozilla.org/show_bug.cgi?id=1070940)
*
* @since 7.5.1
* @param size
* the value to round
* @return the rounded value
*/
public static double roundSizeUp(double size) {
return roundSize(size, true);
}
/**
* Rounds the given size down to a value which the browser will accept.
*
* Safari/WebKit uses 1/64th of a pixel to enable using integer math
* (http://trac.webkit.org/wiki/LayoutUnit).
*
* Firefox uses 1/60th of a pixel because it is divisible by three
* (https://bugzilla.mozilla.org/show_bug.cgi?id=1070940)
*
* IE9+ uses 1/100th of a pixel
*
* @since 7.5.1
* @param size
* the value to round
* @return the rounded value
*/
public static double roundSizeDown(double size) {
return roundSize(size, false);
}
private static double roundSize(double size, boolean roundUp) {
double factor = getSubPixelRoundingFactor();
if (factor < 0 || size < 0) {
return size;
}
if (roundUp) {
return roundSizeUp(size, factor);
} else {
return roundSizeDown(size, factor);
}
}
/**
* Returns the factor used by browsers to round subpixel values
*
* @since 7.5.1
* @return the factor N used by the browser when storing subpixels as X+Y/N
*/
private static double getSubPixelRoundingFactor() {
// Detects how the browser does subpixel rounding
// Currently Firefox uses 1/60th pixels
// and Safari uses 1/64th pixels
// IE 1/100th pixels
if (detectedSubPixelRoundingFactor != -1) {
return detectedSubPixelRoundingFactor;
}
double probeSize = 0.999999;
DivElement div = Document.get().createDivElement();
Document.get().getBody().appendChild(div);
div.getStyle().setHeight(probeSize, Unit.PX);
ComputedStyle computedStyle = new ComputedStyle(div);
double computedHeight = computedStyle.getHeight();
if (computedHeight < probeSize) {
// Rounded down by browser, all browsers but Firefox do this
// today
detectedSubPixelRoundingFactor = (int) Math
.round(1.0 / (1.0 - computedHeight));
} else {
// Rounded up / to nearest by browser
probeSize = 1;
while (computedStyle.getHeight() != 0.0) {
computedHeight = computedStyle.getHeight();
probeSize /= 2.0;
div.getStyle().setHeight(probeSize, Unit.PX);
}
detectedSubPixelRoundingFactor = (int) Math
.round(1.0 / computedHeight);
}
div.removeFromParent();
return detectedSubPixelRoundingFactor;
}
private static double roundSizeUp(double size, double divisor) {
// In: 12.51, 60.0
// 12
double integerPart = (int) size;
// (12.51 - 12) * 60 = 30.6
double nrFractions = (size - integerPart) * divisor;
// 12 + ceil(30.6) / 60 = 12 + 31/60 = 12.51666
return integerPart + (Math.ceil(nrFractions)) / divisor;
}
private static double roundSizeDown(double size, double divisor) {
// In: 12.51, 60.0
// 12
double integerPart = (int) size;
// (12.51 - 12) * 60 = 30.6
double nrFractions = (size - integerPart) * divisor;
// 12 + int(30.6) / 60 = 12 + 30/60 = 12.5
return integerPart + ((int) nrFractions) / divisor;
}
/**
* Returns the X coordinate of an event relative to an element.
*
* @param element
* base element of the relative coordinates
* @param event
* with touch or mouse coordinates
* @return relative X coordinate
* @since 8.1
*/
public static int getRelativeX(Element element, NativeEvent event) {
int relativeLeft = element.getAbsoluteLeft() - Window.getScrollLeft();
return WidgetUtil.getTouchOrMouseClientX(event) - relativeLeft;
}
/**
* Returns the Y coordinate of an event relative to an element.
*
* @param element
* base element of the relative coordinates
* @param event
* with touch or mouse coordinates
* @return relative Y coordinate
* @since 8.1
*/
public static int getRelativeY(Element element, NativeEvent event) {
int relativeTop = element.getAbsoluteTop() - Window.getScrollTop();
return WidgetUtil.getTouchOrMouseClientY(event) - relativeTop;
}
/**
* Returns whether the given object is a string.
*
* @param obj
* the object of which the type is examined
* @return {@code true} if the object is a string; {@code false} if not
* @since 8.2
*/
public static native boolean isString(Object obj)
/*-{
return typeof obj === 'string' || obj instanceof String;
}-*/;
/**
* Returns whether the given element is displayed.
* <p>
* This method returns false if either the given element or any of its
* ancestors has the style {@code display: none} applied.
*
* @param element
* the element to test for visibility
* @return {@code true} if the element is displayed, {@code false} otherwise
* @since 8.3.2
*/
public static native boolean isDisplayed(Element element)
/*-{
// This measurement is borrowed from JQuery and measures the visible
// size of the element. The measurement should return false when either
// the element or any of its ancestors has "display: none" style.
return !!(element.offsetWidth || element.offsetHeight
|| element.getClientRects().length);
}-*/;
/**
* Utility methods for displaying error message on components.
*
* @since 8.2
*/
public static class ErrorUtil {
/**
* Sets the error level style name for the given element and removes all
* previously applied error level style names. The style name has the
* {@code prefix-errorLevel} format.
*
* @param element
* element to apply the style name to
* @param prefix
* part of the style name before the error level string
* @param errorLevel
* error level for which the style will be applied
*/
public static void setErrorLevelStyle(Element element, String prefix,
ErrorLevel errorLevel) {
for (ErrorLevel errorLevelValue : ErrorLevel.values()) {
String className = prefix + "-"
+ errorLevelValue.toString().toLowerCase(Locale.ROOT);
if (errorLevel == errorLevelValue) {
element.addClassName(className);
} else {
element.removeClassName(className);
}
}
}
/**
* Creates an element to use by widgets as an error indicator.
*
* @return the error indicator element
*/
public static Element createErrorIndicatorElement() {
Element indicator = DOM.createSpan();
indicator.setClassName(StyleConstants.STYLE_NAME_ERROR_INDICATOR);
return indicator;
}
}
public static void disableBrowserAutocomplete(TextBox textBox) {
/*-
* Stop the browser from showing its own suggestion popup.
*
* Using an invalid value instead of "off" as suggested by
* https://developer.mozilla.org/en-US/docs/Web/Security/Securing_your_site/Turning_off_form_autocompletion
*
* Leaving the non-standard Safari options autocapitalize and
* autocorrect untouched since those do not interfere in the same
* way, and they might be useful in a combo box where new items are
* allowed.
*/
if (BrowserInfo.get().isChrome()) {
// Chrome supports "off" and random number does not work with
// Chrome
textBox.getElement().setAttribute("autocomplete", "off");
} else {
textBox.getElement().setAttribute("autocomplete", Math.random() + "");
}
}
}
| vaadin/framework | client/src/main/java/com/vaadin/client/WidgetUtil.java |
1,484 | /**
* Source: https://github.com/hsch/blurhash-java
*
* Copyright (c) 2019 Hendrik Schnepel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.thoughtcrime.securesms.blurhash;
import androidx.annotation.Nullable;
final class Base83 {
private static final int MAX_LENGTH = 90;
private static final char[]ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz#$%*+,-.:;=?@[]^_{|}~".toCharArray();
private static int indexOf(char[] a, char key) {
for (int i = 0; i < a.length; i++) {
if (a[i] == key) {
return i;
}
}
return -1;
}
static void encode(long value, int length, char[] buffer, int offset) {
int exp = 1;
for (int i = 1; i <= length; i++, exp *= 83) {
int digit = (int)(value / exp % 83);
buffer[offset + length - i] = ALPHABET[digit];
}
}
static int decode(String value, int fromInclusive, int toExclusive) {
int result = 0;
char[] chars = value.toCharArray();
for (int i = fromInclusive; i < toExclusive; i++) {
result = result * 83 + indexOf(ALPHABET, chars[i]);
}
return result;
}
static boolean isValid(@Nullable String value) {
if (value == null) return false;
final int length = value.length();
if (length == 0 || length > MAX_LENGTH) return false;
for (int i = 0; i < length; i++) {
if (indexOf(ALPHABET, value.charAt(i)) == -1) return false;
}
return true;
}
private Base83() {
}
}
| signalapp/Signal-Android | app/src/main/java/org/thoughtcrime/securesms/blurhash/Base83.java |
1,485 | /* This code is part of Freenet. It is distributed under the GNU General
* Public License, version 2 (or at your option any later version). See
* http://www.gnu.org/ for further details of the GPL. */
package freenet.pluginmanager;
import static java.util.concurrent.TimeUnit.MINUTES;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.Semaphore;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import org.tanukisoftware.wrapper.WrapperManager;
import freenet.client.HighLevelSimpleClient;
import freenet.clients.fcp.ClientPut;
import freenet.clients.http.PageMaker.THEME;
import freenet.clients.http.QueueToadlet;
import freenet.clients.http.Toadlet;
import freenet.config.InvalidConfigValueException;
import freenet.config.NodeNeedRestartException;
import freenet.config.SubConfig;
import freenet.crypt.SHA256;
import freenet.keys.FreenetURI;
import freenet.l10n.BaseL10n.LANGUAGE;
import freenet.l10n.NodeL10n;
import freenet.node.Node;
import freenet.node.NodeClientCore;
import freenet.node.RequestClient;
import freenet.node.RequestClientBuilder;
import freenet.node.RequestStarter;
import freenet.node.useralerts.AbstractUserAlert;
import freenet.node.useralerts.UserAlert;
import freenet.pluginmanager.OfficialPlugins.OfficialPluginDescription;
import freenet.pluginmanager.PluginManager.PluginProgress.ProgressState;
import freenet.support.HTMLNode;
import freenet.support.HexUtil;
import freenet.support.JarClassLoader;
import freenet.support.Logger;
import freenet.support.Logger.LogLevel;
import freenet.support.SerialExecutor;
import freenet.support.Ticker;
import freenet.support.api.BooleanCallback;
import freenet.support.api.HTTPRequest;
import freenet.support.api.StringArrCallback;
import freenet.support.io.Closer;
import freenet.support.io.FileUtil;
import freenet.support.io.NativeThread.PriorityLevel;
public class PluginManager {
private final HashMap<String, FredPlugin> toadletList = new HashMap<String, FredPlugin>();
/* All currently starting plugins. */
private final OfficialPlugins officialPlugins = new OfficialPlugins();
private final LoadedPlugins loadedPlugins = new LoadedPlugins();
final Node node;
private final NodeClientCore core;
private boolean logMINOR;
private boolean logDEBUG;
private final HighLevelSimpleClient client;
private static PluginManager selfinstance = null;
private THEME fproxyTheme;
private final SerialExecutor executor;
private boolean alwaysLoadOfficialPluginsFromCentralServer = false;
static final short PRIO = RequestStarter.INTERACTIVE_PRIORITY_CLASS;
/** Is the plugin system enabled? Set at boot time only. Mainly for simulations. */
private final boolean enabled;
public PluginManager(Node node, int lastVersion) {
logMINOR = Logger.shouldLog(LogLevel.MINOR, this);
logDEBUG = Logger.shouldLog(LogLevel.DEBUG, this);
// config
this.node = node;
this.core = node.clientCore;
if(logMINOR)
Logger.minor(this, "Starting Plugin Manager");
if(logDEBUG)
Logger.debug(this, "Initialize Plugin Manager config");
client = core.makeClient(PRIO, true, false);
// callback executor
executor = new SerialExecutor(PriorityLevel.NORM_PRIORITY.value);
executor.start(node.executor, "PM callback executor");
SubConfig pmconfig = node.config.createSubConfig("pluginmanager");
pmconfig.register("enabled", true, 0, true, true, "PluginManager.enabled", "PluginManager.enabledLong", new BooleanCallback() {
@Override
public synchronized Boolean get() {
return enabled;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException,
NodeNeedRestartException {
if(enabled != val)
throw new NodeNeedRestartException(l10n("changePluginManagerEnabledInConfig"));
}
});
enabled = pmconfig.getBoolean("enabled");
// Start plugins in the config
pmconfig.register("loadplugin", null, 0, true, false, "PluginManager.loadedOnStartup", "PluginManager.loadedOnStartupLong",
new StringArrCallback() {
@Override
public String[] get() {
return getConfigLoadString();
}
@Override
public void set(String[] val) throws InvalidConfigValueException {
//if(storeDir.equals(new File(val))) return;
// FIXME
throw new InvalidConfigValueException(NodeL10n.getBase().getString("PluginManager.cannotSetOnceLoaded"));
}
@Override
public boolean isReadOnly() {
return true;
}
});
toStart = pmconfig.getStringArr("loadplugin");
if(lastVersion < 1237 && contains(toStart, "XMLLibrarian") && !contains(toStart, "Library")) {
toStart = Arrays.copyOf(toStart, toStart.length+1);
toStart[toStart.length-1] = "Library";
System.err.println("Loading Library plugin, replaces XMLLibrarian, when upgrading from pre-1237");
}
if(contains(toStart, "KeyExplorer")) {
for(int i=0;i<toStart.length;i++) {
if("KeyExplorer".equals(toStart[i]))
toStart[i] = "KeyUtils";
}
System.err.println("KeyExplorer plugin renamed to KeyUtils");
}
// This should default to false. Even though we use SSL, a wiretapper may be able to tell which
// plugin is being loaded, and correlate that with identity creation; plus of course they can see
// that somebody is using Freenet.
pmconfig.register("alwaysLoadOfficialPluginsFromCentralServer", false, 0, false, false, "PluginManager.alwaysLoadPluginsFromCentralServer", "PluginManager.alwaysLoadPluginsFromCentralServerLong", new BooleanCallback() {
@Override
public Boolean get() {
return alwaysLoadOfficialPluginsFromCentralServer;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
alwaysLoadOfficialPluginsFromCentralServer = val;
}
});
alwaysLoadOfficialPluginsFromCentralServer = pmconfig.getBoolean("alwaysLoadOfficialPluginsFromCentralServer");
if (lastVersion <= 1437) {
// Overwrite this setting, since it will have been set by the old callback and then written as it's not default.
// FIXME remove back compatibility code.
alwaysLoadOfficialPluginsFromCentralServer = false;
}
pmconfig.finishedInitialization();
fproxyTheme = THEME.themeFromName(node.config.get("fproxy").getString("css"));
selfinstance = this;
}
private boolean contains(String[] array, String string) {
for(String s : array)
if(string.equals(s)) return true;
return false;
}
private boolean started;
private boolean stopping;
private String[] toStart;
public void start() {
if (!enabled) return;
synchronized (loadedPlugins) {
if (started) {
return;
}
}
final Semaphore startingPlugins = new Semaphore(0);
for(final String name : toStart) {
core.getExecutor().execute(new Runnable() {
@Override
public void run() {
startPluginAuto(name, false);
startingPlugins.release();
}
});
}
core.getExecutor().execute(new Runnable() {
@Override
public void run() {
startingPlugins.acquireUninterruptibly(toStart.length);
synchronized (loadedPlugins) {
started = true;
toStart = null;
}
}
});
}
public void stop(long maxWaitTime) {
if(!enabled) return;
// Stop loading plugins.
synchronized (loadedPlugins) {
stopping = true;
}
for (PluginProgress progress : loadedPlugins.getStartingPlugins()) {
progress.kill();
}
// Stop already loaded plugins.
for (PluginInfoWrapper pi : loadedPlugins.getLoadedPlugins()) {
pi.startShutdownPlugin(this, false);
}
long now = System.currentTimeMillis();
long deadline = now + maxWaitTime;
while(true) {
int delta = (int) (deadline - now);
if(delta <= 0) {
String list = pluginList(loadedPlugins.getLoadedPlugins());
Logger.error(this, "Plugins still shutting down at timeout:\n"+list);
System.err.println("Plugins still shutting down at timeout:\n"+list);
} else {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
System.out.println("Waiting for plugin to finish shutting down: " + pluginInfoWrapper.getFilename());
if (pluginInfoWrapper.finishShutdownPlugin(this, delta, false)) {
loadedPlugins.removeLoadedPlugin(pluginInfoWrapper);
}
}
if (!loadedPlugins.hasLoadedPlugins()) {
Logger.normal(this, "All plugins unloaded");
System.out.println("All plugins unloaded");
return;
}
String list = pluginList(loadedPlugins.getLoadedPlugins());
Logger.error(this, "Plugins still shutting down:\n"+list);
System.err.println("Plugins still shutting down:\n"+list);
}
}
}
private static String pluginList(Collection<PluginInfoWrapper> wrappers) {
StringBuffer sb = new StringBuffer();
for(PluginInfoWrapper pi : wrappers) {
sb.append(pi.getFilename());
sb.append('\n');
}
return sb.toString();
}
private String[] getConfigLoadString() {
synchronized (loadedPlugins) {
if (!started) {
return toStart;
}
}
List<String> v = new ArrayList<String>();
for (PluginInfoWrapper pi : loadedPlugins.getLoadedPlugins()) {
v.add(pi.getFilename());
}
v.addAll(loadedPlugins.getFailedPluginNames());
return v.toArray(new String[v.size()]);
}
/**
* Returns a set of all currently starting plugins.
*
* @return All currently starting plugins
*/
public Set<PluginProgress> getStartingPlugins() {
return new HashSet<PluginProgress>(loadedPlugins.getStartingPlugins());
}
// try to guess around...
public PluginInfoWrapper startPluginAuto(final String pluginname, boolean store) {
OfficialPluginDescription desc;
if((desc = isOfficialPlugin(pluginname)) != null) {
return startPluginOfficial(pluginname, store, desc, false, false);
}
try {
new FreenetURI(pluginname); // test for MalformedURLException
return startPluginFreenet(pluginname, store);
} catch(MalformedURLException e) {
// not a freenet key
}
File[] roots = File.listRoots();
for(File f : roots) {
if(pluginname.startsWith(f.getName()) && new File(pluginname).exists()) {
return startPluginFile(pluginname, store);
}
}
return startPluginURL(pluginname, store);
}
public PluginInfoWrapper startPluginOfficial(final String pluginname, boolean store, boolean force, boolean forceHTTPS) {
return startPluginOfficial(pluginname, store, officialPlugins.get(pluginname), force, forceHTTPS);
}
public PluginInfoWrapper startPluginOfficial(final String pluginname, boolean store, OfficialPluginDescription desc, boolean force, boolean forceHTTPS) {
if((alwaysLoadOfficialPluginsFromCentralServer && !force)|| force && forceHTTPS) {
return realStartPlugin(new PluginDownLoaderOfficialHTTPS(), pluginname, store,
desc.alwaysFetchLatestVersion);
} else {
return realStartPlugin(new PluginDownLoaderOfficialFreenet(client, node, false),
pluginname, store, desc.alwaysFetchLatestVersion);
}
}
public PluginInfoWrapper startPluginFile(final String filename, boolean store) {
return realStartPlugin(new PluginDownLoaderFile(), filename, store, false);
}
public PluginInfoWrapper startPluginURL(final String filename, boolean store) {
return realStartPlugin(new PluginDownLoaderURL(), filename, store, false);
}
public PluginInfoWrapper startPluginFreenet(final String filename, boolean store) {
return realStartPlugin(new PluginDownLoaderFreenet(client, node, false), filename, store, false);
}
private PluginInfoWrapper realStartPlugin(final PluginDownLoader<?> pdl, final String filename, final boolean store, boolean alwaysDownload) {
if (!enabled) throw new IllegalStateException("Plugins disabled");
if(filename.trim().length() == 0)
return null;
final PluginProgress pluginProgress = new PluginProgress(filename, pdl);
loadedPlugins.addStartingPlugin(pluginProgress);
Logger.normal(this, "Loading plugin: " + filename);
FredPlugin plug;
PluginInfoWrapper pi = null;
try {
plug = loadPlugin(pdl, filename, pluginProgress, alwaysDownload);
pluginProgress.setProgress(ProgressState.STARTING);
pi = new PluginInfoWrapper(node, plug, filename, pdl.isOfficialPluginLoader());
PluginHandler.startPlugin(PluginManager.this, pi);
loadedPlugins.addLoadedPlugin(pi);
loadedPlugins.removeFailedPlugin(filename);
Logger.normal(this, "Plugin loaded: " + filename);
} catch (PluginAlreadyLoaded e) {
return null;
} catch (PluginNotFoundException e) {
Logger.normal(this, "Loading plugin failed (" + filename + ')', e);
boolean stillTrying = false;
if (pdl.isLoadingFromFreenet()) {
PluginDownLoaderFreenet downloader = (PluginDownLoaderFreenet) pdl;
if (!downloader.fatalFailure() && !downloader.desperate && !twoCopiesInStartingPlugins(filename)) {
// Retry forever...
final PluginDownLoader<?> retry = pdl.getRetryDownloader();
stillTrying = true;
node.getTicker().queueTimedJob(new Runnable() {
@Override
public void run() {
realStartPlugin(retry, filename, store, true);
}
}, 0);
}
}
PluginLoadFailedUserAlert newAlert =
new PluginLoadFailedUserAlert(filename, pdl.isOfficialPluginLoader(), pdl.isOfficialPluginLoader() && pdl.isLoadingFromFreenet(), stillTrying, e);
PluginLoadFailedUserAlert oldAlert = loadedPlugins.replaceUserAlert(filename, newAlert);
core.alerts.register(newAlert);
core.alerts.unregister(oldAlert);
} catch (UnsupportedClassVersionError e) {
Logger.error(this, "Could not load plugin " + filename + " : " + e,
e);
System.err.println("Could not load plugin " + filename + " : " + e);
e.printStackTrace();
System.err.println("Plugin " + filename + " appears to require a later JVM");
Logger.error(this, "Plugin " + filename + " appears to require a later JVM");
PluginLoadFailedUserAlert newAlert =
new PluginLoadFailedUserAlert(filename, pdl.isOfficialPluginLoader(), pdl.isOfficialPluginLoader() && pdl.isLoadingFromFreenet(), false, l10n("pluginReqNewerJVMTitle", "name", filename));
PluginLoadFailedUserAlert oldAlert = loadedPlugins.replaceUserAlert(filename, newAlert);
core.alerts.register(newAlert);
core.alerts.unregister(oldAlert);
} catch (Throwable e) {
Logger.error(this, "Could not load plugin " + filename + " : " + e, e);
System.err.println("Could not load plugin " + filename + " : " + e);
e.printStackTrace();
System.err.println("Plugin "+filename+" is broken, but we want to retry after next startup");
Logger.error(this, "Plugin "+filename+" is broken, but we want to retry after next startup");
PluginLoadFailedUserAlert newAlert =
new PluginLoadFailedUserAlert(filename, pdl.isOfficialPluginLoader(), pdl.isOfficialPluginLoader() && pdl.isLoadingFromFreenet(), false, e);
PluginLoadFailedUserAlert oldAlert = loadedPlugins.replaceUserAlert(filename, newAlert);
core.alerts.register(newAlert);
core.alerts.unregister(oldAlert);
} finally {
loadedPlugins.removeStartingPlugin(pluginProgress);
}
/* try not to destroy the config. */
synchronized(this) {
if (store)
core.storeConfig();
}
if(pi != null)
node.nodeUpdater.startPluginUpdater(filename);
return pi;
}
private synchronized boolean twoCopiesInStartingPlugins(String filename) {
int count = 0;
for (PluginProgress progress : loadedPlugins.getStartingPlugins()) {
if(filename.equals(progress.name)) {
count++;
if(count == 2) return true;
}
}
return false;
}
class PluginLoadFailedUserAlert extends AbstractUserAlert {
final String filename;
final String message;
final StackTraceElement[] stacktrace;
final boolean official;
final boolean officialFromFreenet;
final boolean stillTryingOverFreenet;
public PluginLoadFailedUserAlert(String filename, boolean official, boolean officialFromFreenet, boolean stillTryingOverFreenet, String message) {
this.filename = filename;
this.official = official;
this.message = message;
this.stacktrace = null;
this.officialFromFreenet = officialFromFreenet;
this.stillTryingOverFreenet = stillTryingOverFreenet;
}
public PluginLoadFailedUserAlert(String filename, boolean official, boolean officialFromFreenet, boolean stillTryingOverFreenet, Throwable e) {
this.filename = filename;
this.official = official;
this.stillTryingOverFreenet = stillTryingOverFreenet;
String msg;
if(e instanceof PluginNotFoundException) {
msg = e.getMessage();
stacktrace = null;
} else {
// If it's something wierd, we need to know what it is.
msg = e.getClass() + ": " + e.getMessage();
stacktrace = e.getStackTrace();
}
if(msg == null) msg = e.toString();
this.message = msg;
this.officialFromFreenet = officialFromFreenet;
}
@Override
public String dismissButtonText() {
return l10n("deleteFailedPluginButton");
}
@Override
public void onDismiss() {
loadedPlugins.removeFailedPlugin(filename);
node.executor.execute(new Runnable() {
@Override
public void run() {
cancelRunningLoads(filename, null);
}
});
}
@Override
public String anchor() {
return "pluginfailed:"+filename;
}
@Override
public HTMLNode getHTMLText() {
HTMLNode div = new HTMLNode("div");
HTMLNode p = div.addChild("p");
p.addChild("#", l10n("pluginLoadingFailedWithMessage", new String[] { "name", "message" }, new String[] { filename, message }));
if(stacktrace != null) {
for(StackTraceElement e : stacktrace) {
p.addChild("br");
p.addChild("%", " ");
p.addChild("#", "at " + e);
}
}
if(stillTryingOverFreenet) {
div.addChild("p", l10n("pluginLoadingFailedStillTryingOverFreenet"));
}
if(official) {
p = div.addChild("p");
if(officialFromFreenet)
p.addChild("#", l10n("officialPluginLoadFailedSuggestTryAgainFreenet"));
else
p.addChild("#", l10n("officialPluginLoadFailedSuggestTryAgainHTTPS"));
HTMLNode reloadForm = div.addChild("form", new String[] { "action", "method" }, new String[] { "/plugins/", "post" });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "hidden", "formPassword", node.clientCore.formPassword });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "hidden", "plugin-name", filename });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "hidden", "pluginSource", "https" });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "submit", "submit-official", l10n("officialPluginLoadFailedTryAgain") });
if(!stillTryingOverFreenet) {
reloadForm = div.addChild("form", new String[] { "action", "method" }, new String[] { "/plugins/", "post" });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "hidden", "formPassword", node.clientCore.formPassword });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "hidden", "plugin-name", filename });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "hidden", "pluginSource", "freenet" });
reloadForm.addChild("input", new String[] { "type", "name", "value" }, new String[] { "submit", "submit-official", l10n("officialPluginLoadFailedTryAgainFreenet") });
}
}
return div;
}
@Override
public short getPriorityClass() {
return UserAlert.ERROR;
}
@Override
public String getShortText() {
return l10n("pluginLoadingFailedShort", "name", filename);
}
@Override
public String getText() {
return l10n("pluginLoadingFailedWithMessage", new String[] { "name", "message" }, new String[] { filename, message });
}
@Override
public String getTitle() {
return l10n("pluginLoadingFailedTitle");
}
@Override
public boolean isEventNotification() {
return false;
}
@Override
public boolean isValid() {
boolean success = loadedPlugins.isFailedPlugin(filename);
if(!success) {
core.alerts.unregister(this);
}
return success;
}
@Override
public void isValid(boolean validity) {
}
@Override
public boolean shouldUnregisterOnDismiss() {
return true;
}
@Override
public boolean userCanDismiss() {
return true;
}
}
void register(PluginInfoWrapper pi) {
FredPlugin plug = pi.getPlugin();
// handles FProxy? If so, register
if(pi.isPproxyPlugin())
registerToadlet(plug);
if(pi.isConfigurablePlugin()) {
// Registering the toadlet with atFront=false means that
// the node's ConfigToadlet will clobber the plugin's
// ConfigToadlet and the page will not be visible. So it
// must be registered with atFront=true. This means that
// malicious plugins could try to hijack node config
// pages, to ill effect. Let's avoid that.
boolean pluginIsTryingToHijackNodeConfig = false;
for(SubConfig subconfig : node.config.getConfigs()) {
if(pi.getPluginClassName().equals(subconfig.getPrefix())) {
pluginIsTryingToHijackNodeConfig = true;
break;
}
}
if(pluginIsTryingToHijackNodeConfig) {
Logger.warning(this, "The plugin loaded from "+pi.getFilename()+" is attempting to hijack a node configuration page; refusing to register its ConfigToadlet");
} else {
Toadlet toadlet = pi.getConfigToadlet();
core.getToadletContainer().register(toadlet, "FProxyToadlet.categoryConfig", toadlet.path(), true, "ConfigToadlet."+pi.getPluginClassName()+".label", "ConfigToadlet."+pi.getPluginClassName()+".tooltip", true, null, (FredPluginL10n)pi.getPlugin());
}
}
if(pi.isIPDetectorPlugin())
node.ipDetector.registerIPDetectorPlugin((FredPluginIPDetector) plug);
if(pi.isPortForwardPlugin())
node.ipDetector.registerPortForwardPlugin((FredPluginPortForward) plug);
if(pi.isBandwidthIndicator())
node.ipDetector.registerBandwidthIndicatorPlugin((FredPluginBandwidthIndicator) plug);
}
public void cancelRunningLoads(String filename, PluginProgress exceptFor) {
Logger.normal(this, "Cancelling loads for plugin "+filename);
for (PluginProgress progress : new ArrayList<PluginProgress>(loadedPlugins.getStartingPlugins())) {
if ((progress != exceptFor) && filename.equals(progress.name)) {
progress.kill();
loadedPlugins.removeStartingPlugin(progress);
}
}
}
/**
* Returns the translation of the given key, prefixed by the short name of
* the current class.
*
* @param key
* The key to fetch
* @return The translation
*/
static String l10n(String key) {
return NodeL10n.getBase().getString("PluginManager." + key);
}
private static String l10n(String key, String pattern, String value) {
return NodeL10n.getBase().getString("PluginManager." + key, pattern, value);
}
/**
* Returns the translation of the given key, replacing each occurence of
* <code>${<em>pattern</em>}</code> with <code>value</code>.
*
* @param key
* The key to fetch
* @param patterns
* The patterns to replace
* @param values
* The values to substitute
* @return The translation
*/
private String l10n(String key, String[] patterns, String[] values) {
return NodeL10n.getBase().getString("PluginManager." + key, patterns, values);
}
private void registerToadlet(FredPlugin pl) {
//toadletList.put(e.getStackTrace()[1].getClass().toString(), pl);
synchronized(toadletList) {
toadletList.put(pl.getClass().getName(), pl);
}
Logger.normal(this, "Added HTTP handler for /plugins/" + pl.getClass().getName() + '/');
}
/**
* Remove a plugin from the plugin list.
*/
public void removePlugin(PluginInfoWrapper pi) {
synchronized (loadedPlugins) {
if (!stopping && !loadedPlugins.hasLoadedPlugin(pi)) {
return;
}
}
loadedPlugins.removeLoadedPlugin(pi);
core.storeConfig();
}
/**
* Removes the cached copy of the given plugin from the plugins/ directory.
*
* @param pluginSpecification
* The plugin specification
*/
public void removeCachedCopy(String pluginSpecification) {
if(pluginSpecification == null) {
// Will be null if the file for a given plugin can't be found, eg. if it has already been
// removed. Ignore it since the file isn't there anyway
Logger.warning(this, "Can't remove null from cache. Ignoring");
return;
}
int lastSlash = pluginSpecification.lastIndexOf('/');
String pluginFilename;
if(lastSlash == -1)
/* Windows, maybe? */
lastSlash = pluginSpecification.lastIndexOf('\\');
File pluginDirectory = node.getPluginDir();
if(lastSlash == -1) {
/* it's an official plugin or filename without path */
if (pluginSpecification.toLowerCase().endsWith(".jar"))
pluginFilename = pluginSpecification;
else
pluginFilename = pluginSpecification + ".jar";
} else
pluginFilename = pluginSpecification.substring(lastSlash + 1);
if(logDEBUG)
Logger.minor(this, "Delete plugin - plugname: " + pluginSpecification + " filename: " + pluginFilename, new Exception("debug"));
List<File> cachedFiles = getPreviousInstances(pluginDirectory, pluginFilename);
for (File cachedFile : cachedFiles) {
if (!cachedFile.delete())
if(logMINOR) Logger.minor(this, "Can't delete file " + cachedFile);
}
}
public void unregisterPluginToadlet(PluginInfoWrapper pi) {
synchronized(toadletList) {
try {
toadletList.remove(pi.getPluginClassName());
Logger.normal(this, "Removed HTTP handler for /plugins/" +
pi.getPluginClassName() + '/', new Exception("debug"));
} catch(Throwable ex) {
Logger.error(this, "removing Plugin", ex);
}
}
}
/**
* @deprecated will be removed in version 1473.
*/
@Deprecated
public void addToadletSymlinks(PluginInfoWrapper pi) {
synchronized(toadletList) {
try {
String targets[] = pi.getPluginToadletSymlinks();
if(targets == null)
return;
for(String target: targets) {
toadletList.remove(target);
Logger.normal(this, "Removed HTTP symlink: " + target +
" => /plugins/" + pi.getPluginClassName() + '/');
}
} catch(Throwable ex) {
Logger.error(this, "removing Toadlet-link", ex);
}
}
}
/**
* @deprecated will be removed in version 1473.
*/
@Deprecated
public void removeToadletSymlinks(PluginInfoWrapper pi) {
synchronized(toadletList) {
String rm = null;
try {
String targets[] = pi.getPluginToadletSymlinks();
if(targets == null)
return;
for(String target: targets) {
rm = target;
toadletList.remove(target);
pi.removePluginToadletSymlink(target);
Logger.normal(this, "Removed HTTP symlink: " + target +
" => /plugins/" + pi.getPluginClassName() + '/');
}
} catch(Throwable ex) {
Logger.error(this, "removing Toadlet-link: " + rm, ex);
}
}
}
public String dumpPlugins() {
StringBuilder out = new StringBuilder();
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
out.append(pluginInfoWrapper.toString()).append('\n');
}
return out.toString();
}
public Set<PluginInfoWrapper> getPlugins() {
return new TreeSet<PluginInfoWrapper>(loadedPlugins.getLoadedPlugins());
}
/**
* Look for PluginInfo for a Plugin with given classname or filename.
*
* @return the PluginInfo or null if not found
* @deprecated
* This function was deprecated because the "or filename" part of the function specification
* was NOT documented before it was deprecated. Thus it is possible that legacy callers of
* the function did wrongly expect or not expect that. When removing this function, please
* review the callers for correctness with regards to that.<br>
* You might replace usage of this function with
* {@link #getPluginInfoByClassName(String)}.
*/
@Deprecated
public PluginInfoWrapper getPluginInfo(String plugname) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.getPluginClassName().equals(plugname) || pluginInfoWrapper.getFilename().equals(plugname)) {
return pluginInfoWrapper;
}
}
return null;
}
/**
* @param pluginClassName
* The name of the main class of the plugin - that is the class which implements
* {@link FredPlugin}.
* @return
* The {@link PluginInfoWrapper} for the plugin with the given class name, or null if no
* matching plugin was found.
*/
public PluginInfoWrapper getPluginInfoByClassName(String pluginClassName) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.getPluginClassName().equals(pluginClassName)) {
return pluginInfoWrapper;
}
}
return null;
}
/**
* look for a FCPPlugin with given classname
* @param plugname
* @return the plugin or null if not found
* @deprecated
* The {@link FredPluginFCP} API, which this returns, was deprecated to be replaced by
* {@link FredPluginFCPMessageHandler.ServerSideFCPMessageHandler}. Plugin authors should
* implement the new interface instead of the old, and this codepath to support plugins
* which implement the old interface should be removed one day. No new code will be needed
* then: The code to use the new interface already exists in its own codepath - the
* equivalent function for the new API is {link #getPluginFCPServer(String)}, and it is
* already being used automatically for plugins which implement it.
*/
@Deprecated
public FredPluginFCP getFCPPlugin(String plugname) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.isFCPPlugin() && pluginInfoWrapper.getPluginClassName().equals(plugname) && !pluginInfoWrapper.isStopping()) {
return (FredPluginFCP) pluginInfoWrapper.plug;
}
}
return null;
}
/**
* Get the {@link FredPluginFCPMessageHandler.ServerSideFCPMessageHandler} of the plugin with
* the given class name.
*
* @param pluginClassName
* See {@link #getPluginInfoByClassName(String)}.
* @throws PluginNotFoundException
* If the specified plugin is not loaded or does not provide an FCP server.
*/
public FredPluginFCPMessageHandler.ServerSideFCPMessageHandler
getPluginFCPServer(String pluginClassName)
throws PluginNotFoundException{
PluginInfoWrapper piw = getPluginInfoByClassName(pluginClassName);
if(piw != null && piw.isFCPServerPlugin()) {
return piw.getFCPServerPlugin();
} else {
throw new PluginNotFoundException(pluginClassName);
}
}
/**
* look for a Plugin with given classname
* @param plugname
* @return the true if not found
*/
public boolean isPluginLoaded(String plugname) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.getPluginClassName().equals(plugname) || pluginInfoWrapper.getFilename().equals(plugname)) {
return true;
}
}
return false;
}
/**
* @param plugname The plugin filename e.g. "Library" for an official plugin.
* @return the true if not found
*/
public boolean isPluginLoadedOrLoadingOrWantLoad(String plugname) {
return loadedPlugins.isKnownPlugin(plugname);
}
public String handleHTTPGet(String plugin, HTTPRequest request) throws PluginHTTPException {
FredPlugin handler = null;
synchronized(toadletList) {
handler = toadletList.get(plugin);
}
if (!(handler instanceof FredPluginHTTP)) {
throw new NotFoundPluginHTTPException("Plugin not loaded!", "/plugins");
}
ClassLoader oldClassLoader = Thread.currentThread().getContextClassLoader();
ClassLoader pluginClassLoader = handler.getClass().getClassLoader();
Thread.currentThread().setContextClassLoader(pluginClassLoader);
try {
return ((FredPluginHTTP) handler).handleHTTPGet(request);
} finally {
Thread.currentThread().setContextClassLoader(oldClassLoader);
}
}
public String handleHTTPPost(String plugin, HTTPRequest request) throws PluginHTTPException {
FredPlugin handler = null;
synchronized(toadletList) {
handler = toadletList.get(plugin);
}
if (handler == null)
throw new NotFoundPluginHTTPException("Plugin '"+plugin+"' not found!", "/plugins");
ClassLoader oldClassLoader = Thread.currentThread().getContextClassLoader();
ClassLoader pluginClassLoader = handler.getClass().getClassLoader();
Thread.currentThread().setContextClassLoader(pluginClassLoader);
try {
if(handler instanceof FredPluginHTTP)
return ((FredPluginHTTP) handler).handleHTTPPost(request);
} finally {
Thread.currentThread().setContextClassLoader(oldClassLoader);
}
throw new NotFoundPluginHTTPException("Plugin '"+plugin+"' not found!", "/plugins");
}
public void killPlugin(String name, long maxWaitTime, boolean reloading) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.getThreadName().equals(name)) {
pluginInfoWrapper.stopPlugin(this, maxWaitTime, reloading);
break;
}
}
}
public void killPluginByFilename(String name, long maxWaitTime, boolean reloading) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.getFilename().equals(name)) {
pluginInfoWrapper.stopPlugin(this, maxWaitTime, reloading);
break;
}
}
}
public void killPluginByClass(String name, final long maxWaitTime) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.getPluginClassName().equals(name)) {
pluginInfoWrapper.stopPlugin(this, maxWaitTime, false);
break;
}
}
}
public void killPlugin(FredPlugin plugin, long maxWaitTime) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.plug == plugin) {
pluginInfoWrapper.stopPlugin(this, maxWaitTime, false);
break;
}
}
}
public OfficialPluginDescription getOfficialPlugin(String name) {
return officialPlugins.get(name);
}
public Collection<OfficialPluginDescription> getOfficialPlugins() {
return officialPlugins.getAll();
}
/**
* Returns a list of the names of all available official plugins. Right now
* this list is hardcoded but in future we could retrieve this list from emu
* or from freenet itself.
*
* @return A list of all available plugin names
*/
public List<OfficialPluginDescription> findAvailablePlugins() {
List<OfficialPluginDescription> availablePlugins = new ArrayList<OfficialPluginDescription>();
availablePlugins.addAll(officialPlugins.getAll());
return availablePlugins;
}
public OfficialPluginDescription isOfficialPlugin(String name) {
if((name == null) || (name.trim().length() == 0))
return null;
List<OfficialPluginDescription> availablePlugins = findAvailablePlugins();
for(OfficialPluginDescription desc : availablePlugins) {
if(desc.name.equals(name))
return desc;
}
return null;
}
/** Separate lock for plugin loading. Don't use (this) as we also use that for
* writing the config file, and because we do a lot inside the lock below; it
* must not be taken in any other circumstance. */
private final Object pluginLoadSyncObject = new Object();
/** All plugin updates are on a single request client. */
public final RequestClient singleUpdaterRequestClient = new RequestClientBuilder().build();
public File getPluginFilename(String pluginName) {
File pluginDirectory = node.getPluginDir();
if((pluginDirectory.exists() && !pluginDirectory.isDirectory()) || (!pluginDirectory.exists() && !pluginDirectory.mkdirs()))
return null;
return new File(pluginDirectory, pluginName + ".jar");
}
/**
* Tries to load a plugin from the given name. If the name only contains the
* name of a plugin it is loaded from the plugin directory, if found,
* otherwise it's loaded from the project server. If the name contains a
* complete url and the short file already exists in the plugin directory
* it's loaded from the plugin directory, otherwise it's retrieved from the
* remote server.
* @param pdl
*
* @param name
* The specification of the plugin
* @param alwaysDownload If true, always download a new version anyway.
* This is especially important on Windows, where we will not usually be
* able to delete the file after determining that it is too old.
* @return An instanciated object of the plugin
* @throws PluginNotFoundException
* If anything goes wrong.
* @throws PluginAlreadyLoaded if the plugin is already loaded
*/
private FredPlugin loadPlugin(PluginDownLoader<?> pdl, String name, PluginProgress progress, boolean alwaysDownload) throws PluginNotFoundException, PluginAlreadyLoaded {
pdl.setSource(name);
File pluginDirectory = getPluginDirectory();
/* get plugin filename. */
String filename = pdl.getPluginName(name);
File pluginFile = getTargetFileForPluginDownload(pluginDirectory, filename, !pdl.isCachingProhibited() && !alwaysDownload);
boolean downloadWasAttempted = false;
/* check if file needs to be downloaded. */
if(logMINOR)
Logger.minor(this, "plugin file " + pluginFile.getAbsolutePath() + " exists: " + pluginFile.exists()+" downloader "+pdl+" name "+name);
int RETRIES = 5;
for (int i = 0; i < RETRIES; i++) {
if (!pluginFile.exists() || pluginFile.length() == 0) {
try {
downloadWasAttempted = true;
System.err.println("Downloading plugin " + name);
WrapperManager.signalStarting((int) MINUTES.toMillis(5));
try {
downloadPluginFile(pdl, pluginDirectory, pluginFile, progress);
verifyDigest(pdl, pluginFile);
} catch (IOException ioe1) {
Logger.error(this, "could not load plugin", ioe1);
throw new PluginNotFoundException("could not load plugin: " + ioe1.getMessage(), ioe1);
}
} catch (PluginNotFoundException e) {
if (i < RETRIES - 1) {
Logger.normal(this, "Failed to load plugin: " + e, e);
continue;
} else {
throw e;
}
}
}
cancelRunningLoads(name, progress);
// we do quite a lot inside the lock, use a dedicated one
synchronized (pluginLoadSyncObject) {
String pluginMainClassName;
try {
pluginMainClassName = verifyJarFileAndGetPluginMainClass(pluginFile);
FredPlugin object = loadPluginFromJarFile(name, pluginFile, pluginMainClassName, pdl.isOfficialPluginLoader());
if (object != null) {
return object;
}
} catch (PluginNotFoundException e) {
Logger.error(this, e.getMessage());
pluginFile.delete();
if (!downloadWasAttempted) {
continue;
}
throw e;
}
}
}
return null;
}
private File getPluginDirectory() throws PluginNotFoundException {
File pluginDirectory = node.getPluginDir();
if ((pluginDirectory.exists() && !pluginDirectory.isDirectory()) || (!pluginDirectory.exists() && !pluginDirectory.mkdirs())) {
Logger.error(this, "could not create plugin directory");
throw new PluginNotFoundException("could not create plugin directory");
}
return pluginDirectory;
}
private File getTargetFileForPluginDownload(File pluginDirectory, String filename, boolean useCachedFile) {
List<File> filesInPluginDirectory = getPreviousInstances(pluginDirectory, filename);
cleanCacheDirectory(filesInPluginDirectory, useCachedFile);
if (!filesInPluginDirectory.isEmpty() && useCachedFile) {
return new File(pluginDirectory, filesInPluginDirectory.get(0).getName());
}
return new File(pluginDirectory, filename + "-" + System.currentTimeMillis());
}
private void cleanCacheDirectory(List<File> filesInPluginDirectory, boolean useCachedFile) {
if (!useCachedFile) {
deleteCachedVersions(filesInPluginDirectory);
} else if (!filesInPluginDirectory.isEmpty()) {
deleteCachedVersions(filesInPluginDirectory.subList(1, filesInPluginDirectory.size()));
}
}
private void deleteCachedVersions(List<File> filesInPluginDirectory) {
for (File cachedFile : filesInPluginDirectory) {
cachedFile.delete();
}
}
private void downloadPluginFile(PluginDownLoader<?> pluginDownLoader, File pluginDirectory, File pluginFile, PluginProgress pluginProgress) throws IOException, PluginNotFoundException {
File tempPluginFile = File.createTempFile("plugin-", ".jar", pluginDirectory);
tempPluginFile.deleteOnExit();
OutputStream pluginOutputStream = null;
InputStream pluginInputStream = null;
try {
pluginOutputStream = new FileOutputStream(tempPluginFile);
pluginInputStream = pluginDownLoader.getInputStream(pluginProgress);
FileUtil.copy(pluginInputStream, pluginOutputStream, -1);
} catch (IOException ioe1) {
tempPluginFile.delete();
throw ioe1;
} finally {
Closer.close(pluginInputStream);
Closer.close(pluginOutputStream);
}
if (tempPluginFile.length() == 0) {
throw new PluginNotFoundException("downloaded zero length file");
}
if (!FileUtil.renameTo(tempPluginFile, pluginFile)) {
Logger.error(this, "could not rename temp file to plugin file");
throw new PluginNotFoundException("could not rename temp file to plugin file");
}
}
private void verifyDigest(PluginDownLoader<?> pluginDownLoader, File pluginFile) throws PluginNotFoundException {
String digest = pluginDownLoader.getSHA1sum();
if (digest == null) {
return;
}
String testsum = getFileDigest(pluginFile, "SHA-1");
if (!(digest.equalsIgnoreCase(testsum))) {
Logger.error(this, "Checksum verification failed, should be " + digest + " but was " + testsum);
throw new PluginNotFoundException("Checksum verification failed, should be " + digest + " but was " + testsum);
}
}
private String verifyJarFileAndGetPluginMainClass(File pluginFile) throws PluginNotFoundException, PluginAlreadyLoaded {
JarFile pluginJarFile = null;
try {
pluginJarFile = new JarFile(pluginFile);
Manifest manifest = pluginJarFile.getManifest();
if (manifest == null) {
throw new PluginNotFoundException("could not load manifest from plugin file");
}
Attributes mainAttributes = manifest.getMainAttributes();
if (mainAttributes == null) {
throw new PluginNotFoundException("manifest does not contain attributes");
}
String pluginMainClassName = mainAttributes.getValue("Plugin-Main-Class");
if (pluginMainClassName == null) {
throw new PluginNotFoundException("manifest does not contain a Plugin-Main-Class attribute");
}
if (isPluginLoaded(pluginMainClassName)) {
Logger.error(this, "Plugin already loaded: " + pluginFile.getName());
throw new PluginAlreadyLoaded();
}
return pluginMainClassName;
} catch (IOException ioe1) {
throw new PluginNotFoundException("error procesesing jar file", ioe1);
} finally {
Closer.close(pluginJarFile);
}
}
private FredPlugin loadPluginFromJarFile(String name, File pluginFile, String pluginMainClassName, boolean isOfficialPlugin) throws PluginNotFoundException {
try {
JarClassLoader jarClassLoader = new JarClassLoader(pluginFile);
Class<?> pluginMainClass = jarClassLoader.loadClass(pluginMainClassName);
Object object = pluginMainClass.newInstance();
if (!(object instanceof FredPlugin)) {
throw new PluginNotFoundException("plugin main class is not a plugin");
}
if (isOfficialPlugin) {
verifyPluginVersion(name, jarClassLoader, (FredPlugin) object);
}
if (object instanceof FredPluginL10n) {
((FredPluginL10n) object).setLanguage(NodeL10n.getBase().getSelectedLanguage());
}
if (object instanceof FredPluginBaseL10n) {
((FredPluginBaseL10n) object).setLanguage(NodeL10n.getBase().getSelectedLanguage());
}
if (object instanceof FredPluginThemed) {
((FredPluginThemed) object).setTheme(fproxyTheme);
}
return (FredPlugin) object;
} catch (IOException ioe1) {
throw new PluginNotFoundException("could not load plugin", ioe1);
} catch (ClassNotFoundException cnfe1) {
throw new PluginNotFoundException("could not find plugin class: \"" + cnfe1.getMessage() + "\"", cnfe1);
} catch (InstantiationException ie1) {
throw new PluginNotFoundException("could not instantiate plugin", ie1);
} catch (IllegalAccessException iae1) {
throw new PluginNotFoundException("could not access plugin main class", iae1);
} catch (NoClassDefFoundError ncdfe1) {
throw new PluginNotFoundException("could not find class def, may a missing lib?", ncdfe1);
} catch (Throwable t) {
throw new PluginNotFoundException("unexpected error while plugin loading " + t, t);
}
}
private void verifyPluginVersion(String name, JarClassLoader jarClassLoader, FredPlugin plugin) throws PluginTooOldException {
System.err.println("Loading official plugin " + name);
// Check the version after loading it!
// FIXME IMPORTANT Build the version into the manifest. This is actually pretty easy and just involves changing build.xml.
// We already do similar things elsewhere.
// Ugh, this is just as messy ... ideas???? Maybe we need to have OS
// detection and use grep/sed on unix and find on windows???
OfficialPluginDescription desc = officialPlugins.get(name);
long minVer = desc.minimumVersion;
long ver = -1;
if (minVer != -1) {
if (plugin instanceof FredPluginRealVersioned) {
ver = ((FredPluginRealVersioned) plugin).getRealVersion();
}
}
// FIXME l10n the PluginNotFoundException errors.
if (ver < minVer) {
System.err.println("Failed to load plugin " + name + " : TOO OLD: need at least version " + minVer + " but is " + ver);
Logger.error(this, "Failed to load plugin " + name + " : TOO OLD: need at least version " + minVer + " but is " + ver);
// At this point, the constructor has run, so it's theoretically possible that the plugin has e.g. created some threads.
// However, it has not been able to use any of the node's services, because we haven't passed it the PluginRespirator.
// So there is no need to call runPlugin and terminate().
// And it doesn't matter all that much if the shutdown fails - we won't be able to delete the file on Windows anyway, we're relying on the ignoreOld logic.
// Plus, this will not cause a leak of more than one fd per plugin, even when it has started threads.
try {
jarClassLoader.close();
} catch (Throwable t) {
Logger.error(this, "Failed to close jar classloader for plugin: " + t, t);
}
throw new PluginTooOldException("plugin too old: need at least version " + minVer + " but is " + ver);
}
}
/**
* This returns all existing instances of cached JAR files that start with
* the given filename followed by a dash (“-”), sorted numerically by the
* appendix, largest (i.e. newest) first.
*
* @param pluginDirectory
* The plugin cache directory
* @param filename
* The name of the JAR file
* @return All cached instances
*/
private List<File> getPreviousInstances(File pluginDirectory, final String filename) {
List<File> cachedFiles = Arrays.asList(pluginDirectory.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname.isFile() && pathname.getName().startsWith(filename);
}
}));
Collections.sort(cachedFiles, new Comparator<File>() {
@Override
public int compare(File file1, File file2) {
return (int) Math.min(Integer.MAX_VALUE, Math.max(Integer.MIN_VALUE, extractTimestamp(file2.getName()) - extractTimestamp(file1.getName())));
}
private long extractTimestamp(String filename) {
int lastIndexOfDash = filename.lastIndexOf(".jar-");
if (lastIndexOfDash == -1) {
return 0;
}
try {
return Long.parseLong(filename.substring(lastIndexOfDash + 5));
} catch (NumberFormatException nfe1) {
return 0;
}
}
});
return cachedFiles;
}
private String getFileDigest(File file, String digest) throws PluginNotFoundException {
final int BUFFERSIZE = 4096;
MessageDigest hash = null;
FileInputStream fis = null;
BufferedInputStream bis = null;
boolean wasFromDigest256Pool = false;
String result;
try {
if ("SHA-256".equals(digest)) {
hash = SHA256.getMessageDigest(); // grab digest from pool
wasFromDigest256Pool = true;
} else {
hash = MessageDigest.getInstance(digest);
}
// We compute the hash
// http://java.sun.com/developer/TechTips/1998/tt0915.html#tip2
fis = new FileInputStream(file);
bis = new BufferedInputStream(fis);
int len = 0;
byte[] buffer = new byte[BUFFERSIZE];
while((len = bis.read(buffer)) > -1) {
hash.update(buffer, 0, len);
}
result = HexUtil.bytesToHex(hash.digest());
if (wasFromDigest256Pool)
SHA256.returnMessageDigest(hash);
} catch(Exception e) {
throw new PluginNotFoundException("Error while computing hash '"+digest+"' of the downloaded plugin: " + e, e);
} finally {
Closer.close(bis);
Closer.close(fis);
}
return result;
}
Ticker getTicker() {
return node.getTicker();
}
/**
* Tracks the progress of loading and starting a plugin.
*
* @author David ‘Bombe’ Roden <[email protected]>
* @version $Id$
*/
public static class PluginProgress {
enum ProgressState {
DOWNLOADING,
STARTING
}
/** The starting time. */
private long startingTime = System.currentTimeMillis();
/** The current state. */
private ProgressState pluginProgress;
/** The name by which the plugin is loaded. */
private String name;
/** Total. Might be bytes, might be blocks. */
private int total;
/** Minimum for success */
private int minSuccessful;
/** Current value. Same units as total. */
private int current;
private boolean finalisedTotal;
private int failed;
private int fatallyFailed;
private final PluginDownLoader<?> loader;
/**
* Creates a new progress tracker for a plugin that is loaded by the
* given name.
*
* @param name
* The name by which the plugin is loaded
* @param pdl
*/
PluginProgress(String name, PluginDownLoader<?> pdl) {
this.name = name;
pluginProgress = ProgressState.DOWNLOADING;
loader = pdl;
}
public void kill() {
loader.tryCancel();
}
/**
* Returns the number of milliseconds this plugin is already being
* loaded.
*
* @return The time this plugin is already being loaded (in
* milliseconds)
*/
public long getTime() {
return System.currentTimeMillis() - startingTime;
}
/**
* Returns the name by which the plugin is loaded.
*
* @return The name by which the plugin is loaded
*/
public String getName() {
return name;
}
/**
* Returns the current state of the plugin start procedure.
*
* @return The current state of the plugin
*/
public ProgressState getProgress() {
return pluginProgress;
}
/**
* Sets the current state of the plugin start procedure
*
* @param pluginProgress
* The current state
*/
void setProgress(ProgressState state) {
this.pluginProgress = state;
}
/**
* If this object is one of the constants {@link ProgressState#DOWNLOADING} or
* {@link ProgressState#STARTING}, the name of those constants will be returned,
* otherwise a textual representation of the plugin progress is
* returned.
*
* @return The name of a constant, or the plugin progress
*/
@Override
public String toString() {
return "PluginProgress[name=" + name + ",startingTime=" + startingTime + ",progress=" + pluginProgress + "]";
}
public HTMLNode toLocalisedHTML() {
if(pluginProgress == ProgressState.DOWNLOADING && total > 0) {
return QueueToadlet.createProgressCell(false, true, ClientPut.COMPRESS_STATE.WORKING, current, failed, fatallyFailed, minSuccessful, total, finalisedTotal, false);
} else if(pluginProgress == ProgressState.DOWNLOADING)
return new HTMLNode("td", NodeL10n.getBase().getString("PproxyToadlet.startingPluginStatus.downloading"));
else if(pluginProgress == ProgressState.STARTING)
return new HTMLNode("td", NodeL10n.getBase().getString("PproxyToadlet.startingPluginStatus.starting"));
else
return new HTMLNode("td", toString());
}
public void setDownloadProgress(int minSuccess, int current, int total, int failed, int fatallyFailed, boolean finalised) {
this.pluginProgress = ProgressState.DOWNLOADING;
this.total = total;
this.current = current;
this.minSuccessful = minSuccess;
this.failed = failed;
this.fatallyFailed = fatallyFailed;
this.finalisedTotal = finalised;
}
public void setDownloading() {
this.pluginProgress = ProgressState.DOWNLOADING;
}
public boolean isOfficialPlugin() {
return loader.isOfficialPluginLoader();
}
public String getLocalisedPluginName() {
String pluginName = getName();
if(isOfficialPlugin()) {
return getOfficialPluginLocalisedName(pluginName);
} else return pluginName;
}
}
static String getOfficialPluginLocalisedName(String pluginName) {
return l10n("pluginName."+pluginName);
}
public void setFProxyTheme(final THEME cssName) {
//if (fproxyTheme.equals(cssName)) return;
fproxyTheme = cssName;
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
pluginInfoWrapper.pr.getPageMaker().setTheme(cssName);
if (pluginInfoWrapper.isThemedPlugin()) {
final FredPluginThemed plug = (FredPluginThemed) pluginInfoWrapper.plug;
executor.execute(new Runnable() {
@Override
public void run() {
try {
plug.setTheme(cssName);
} catch (Throwable t) {
Logger.error(this, "Cought Trowable in Callback", t);
}
}
}, "Callback");
}
}
}
public static void setLanguage(LANGUAGE lang) {
if (selfinstance == null) return;
selfinstance.setPluginLanguage(lang);
}
private void setPluginLanguage(final LANGUAGE lang) {
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins.getLoadedPlugins()) {
if (pluginInfoWrapper.isL10nPlugin()) {
final FredPluginL10n plug = (FredPluginL10n) (pluginInfoWrapper.plug);
executor.execute(new Runnable() {
@Override
public void run() {
try {
plug.setLanguage(lang);
} catch (Throwable t) {
Logger.error(this, "Cought Trowable in Callback", t);
}
}
}, "Callback");
} else if (pluginInfoWrapper.isBaseL10nPlugin()) {
final FredPluginBaseL10n plug = (FredPluginBaseL10n) (pluginInfoWrapper.plug);
executor.execute(new Runnable() {
@Override
public void run() {
try {
plug.setLanguage(lang);
} catch (Throwable t) {
Logger.error(this, "Cought Trowable in Callback", t);
}
}
}, "Callback");
}
}
}
/**
* @deprecated will be removed in version 1473.
*/
@Deprecated
public THEME getFProxyTheme() {
return fproxyTheme;
}
public boolean loadOfficialPluginsFromWeb() {
return alwaysLoadOfficialPluginsFromCentralServer;
}
public void unregisterPlugin(PluginInfoWrapper wrapper, FredPlugin plug, boolean reloading) {
unregisterPluginToadlet(wrapper);
if(wrapper.isConfigurablePlugin()) {
core.getToadletContainer().unregister(wrapper.getConfigToadlet());
}
if(wrapper.isIPDetectorPlugin())
node.ipDetector.unregisterIPDetectorPlugin((FredPluginIPDetector)plug);
if(wrapper.isPortForwardPlugin())
node.ipDetector.unregisterPortForwardPlugin((FredPluginPortForward)plug);
if(wrapper.isBandwidthIndicator())
node.ipDetector.unregisterBandwidthIndicatorPlugin((FredPluginBandwidthIndicator)plug);
if(!reloading)
node.nodeUpdater.stopPluginUpdater(wrapper.getFilename());
}
public boolean isEnabled() {
return enabled;
}
private static class LoadedPlugins {
private final Set<PluginProgress> startingPlugins = new HashSet<PluginProgress>();
private final Set<PluginInfoWrapper> loadedPlugins = new HashSet<PluginInfoWrapper>();
private final Map<String, PluginLoadFailedUserAlert> failedPluginAlerts = new HashMap<String, PluginLoadFailedUserAlert>();
public void addStartingPlugin(PluginProgress pluginProgress) {
synchronized (this) {
startingPlugins.add(pluginProgress);
}
}
public Collection<PluginProgress> getStartingPlugins() {
synchronized (this) {
return startingPlugins;
}
}
public void removeStartingPlugin(PluginProgress pluginProgress) {
synchronized (this) {
startingPlugins.remove(pluginProgress);
}
}
public Collection<PluginInfoWrapper> getLoadedPlugins() {
synchronized (this) {
return loadedPlugins;
}
}
public void removeLoadedPlugin(PluginInfoWrapper pluginInfoWrapper) {
synchronized (this) {
loadedPlugins.remove(pluginInfoWrapper);
}
}
public boolean hasLoadedPlugin(PluginInfoWrapper pluginInfoWrapper) {
synchronized (this) {
return loadedPlugins.contains(pluginInfoWrapper);
}
}
public boolean hasLoadedPlugins() {
synchronized (this) {
return !loadedPlugins.isEmpty();
}
}
public Collection<String> getFailedPluginNames() {
synchronized (this) {
return failedPluginAlerts.keySet();
}
}
public void addLoadedPlugin(PluginInfoWrapper pluginInfoWrapper) {
synchronized (this) {
loadedPlugins.add(pluginInfoWrapper);
}
}
public PluginLoadFailedUserAlert replaceUserAlert(String pluginName, PluginLoadFailedUserAlert pluginLoadFailedUserAlert) {
synchronized (this) {
return failedPluginAlerts.put(pluginName, pluginLoadFailedUserAlert);
}
}
public boolean isFailedPlugin(String filename) {
synchronized (this) {
return failedPluginAlerts.containsKey(filename);
}
}
public void removeFailedPlugin(String pluginName) {
synchronized (this) {
failedPluginAlerts.remove(pluginName);
}
}
public boolean isKnownPlugin(String pluginName) {
synchronized (this) {
if (failedPluginAlerts.containsKey(pluginName)) {
return true;
}
for (PluginProgress pluginProgress : startingPlugins) {
if (pluginProgress.getName().equals(pluginName)) {
return true;
}
}
for (PluginInfoWrapper pluginInfoWrapper : loadedPlugins) {
if (pluginInfoWrapper.getFilename().equals(pluginName)) {
return true;
}
}
}
return false;
}
}
}
| hyphanet/fred | src/freenet/pluginmanager/PluginManager.java |
1,486 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.apache.logging.log4j.Logger;
import io.crate.common.collections.Tuple;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.Semaphore;
import java.util.function.Consumer;
/**
* This async IO processor allows to batch IO operations and have a single writer processing the write operations.
* This can be used to ensure that threads can continue with other work while the actual IO operation is still processed
* by a single worker. A worker in this context can be any caller of the {@link #put(Object, Consumer)} method since it will
* hijack a worker if nobody else is currently processing queued items. If the internal queue has reached it's capacity incoming threads
* might be blocked until other items are processed
*/
public abstract class AsyncIOProcessor<Item> {
private final Logger logger;
private final ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> queue;
private final Semaphore promiseSemaphore = new Semaphore(1);
protected AsyncIOProcessor(Logger logger, int queueSize) {
this.logger = logger;
this.queue = new ArrayBlockingQueue<>(queueSize);
}
/**
* Adds the given item to the queue. The listener is notified once the item is processed
*/
public final void put(Item item, Consumer<Exception> listener) {
Objects.requireNonNull(item, "item must not be null");
Objects.requireNonNull(listener, "listener must not be null");
// the algorithm here tires to reduce the load on each individual caller.
// we try to have only one caller that processes pending items to disc while others just add to the queue but
// at the same time never overload the node by pushing too many items into the queue.
// we first try make a promise that we are responsible for the processing
final boolean promised = promiseSemaphore.tryAcquire();
if (promised == false) {
// in this case we are not responsible and can just block until there is space
try {
queue.put(new Tuple<>(item, listener));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
listener.accept(e);
}
}
// here we have to try to make the promise again otherwise there is a race when a thread puts an entry without making the promise
// while we are draining that mean we might exit below too early in the while loop if the drainAndSync call is fast.
if (promised || promiseSemaphore.tryAcquire()) {
final List<Tuple<Item, Consumer<Exception>>> candidates = new ArrayList<>();
if (promised) {
// we are responsible for processing we don't need to add the tuple to the queue we can just add it to the candidates
// no need to preserve context for listener since it runs in current thread.
candidates.add(new Tuple<>(item, listener));
}
// since we made the promise to process we gotta do it here at least once
drainAndProcessAndRelease(candidates);
while (queue.isEmpty() == false && promiseSemaphore.tryAcquire()) {
// yet if the queue is not empty AND nobody else has yet made the promise to take over we continue processing
drainAndProcessAndRelease(candidates);
}
}
}
private void drainAndProcessAndRelease(List<Tuple<Item, Consumer<Exception>>> candidates) {
Exception exception;
try {
queue.drainTo(candidates);
exception = processList(candidates);
} finally {
promiseSemaphore.release();
}
notifyList(candidates, exception);
candidates.clear();
}
private Exception processList(List<Tuple<Item, Consumer<Exception>>> candidates) {
Exception exception = null;
if (candidates.isEmpty() == false) {
try {
write(candidates);
} catch (Exception ex) { // if this fails we are in deep shit - fail the request
logger.debug("failed to write candidates", ex);
// this exception is passed to all listeners - we don't retry. if this doesn't work we are in deep shit
exception = ex;
}
}
return exception;
}
private void notifyList(List<Tuple<Item, Consumer<Exception>>> candidates, Exception exception) {
for (Tuple<Item, Consumer<Exception>> tuple : candidates) {
Consumer<Exception> consumer = tuple.v2();
try {
consumer.accept(exception);
} catch (Exception ex) {
logger.warn("failed to notify callback", ex);
}
}
}
/**
* Writes or processes the items out or to disk.
*/
protected abstract void write(List<Tuple<Item, Consumer<Exception>>> candidates) throws IOException;
}
| crate/crate | server/src/main/java/org/elasticsearch/common/util/concurrent/AsyncIOProcessor.java |
1,487 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
import java.io.Closeable;
import java.io.IOException;
import java.io.PrintStream;
import java.io.UncheckedIOException;
import java.nio.channels.ClosedByInterruptException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.LongSupplier;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCache;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.ReferenceManager;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.SetOnce;
import org.apache.lucene.util.ThreadInterruptedException;
import org.elasticsearch.Assertions;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.support.replication.PendingReplicationActions;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.CheckedRunnable;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
import org.elasticsearch.common.util.concurrent.RunOnce;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.gateway.WriteStateException;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.NoOpEngine;
import org.elasticsearch.index.engine.ReadOnlyEngine;
import org.elasticsearch.index.engine.RefreshFailedEngineException;
import org.elasticsearch.index.engine.SafeCommitInfo;
import org.elasticsearch.index.engine.Segment;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.RootObjectMapper;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.seqno.ReplicationTracker;
import org.elasticsearch.index.seqno.RetentionLease;
import org.elasticsearch.index.seqno.RetentionLeaseStats;
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
import org.elasticsearch.index.seqno.RetentionLeases;
import org.elasticsearch.index.seqno.SeqNoStats;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.Store.MetadataSnapshot;
import org.elasticsearch.index.store.StoreFileMetadata;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.index.translog.TranslogStats;
import org.elasticsearch.indices.IndexingMemoryController;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.RecoveryTarget;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.jetbrains.annotations.Nullable;
import com.carrotsearch.hppc.ObjectLongMap;
import io.crate.common.Booleans;
import io.crate.common.collections.Tuple;
import io.crate.common.exceptions.Exceptions;
import io.crate.common.io.IOUtils;
import io.crate.common.unit.TimeValue;
import io.crate.exceptions.SQLExceptions;
public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard {
public static final long RETAIN_ALL = -1;
private final ThreadPool threadPool;
private final MapperService mapperService;
private final QueryCache queryCache;
private final Store store;
private final Object mutex = new Object();
private final String checkIndexOnStartup;
private final CodecService codecService;
private final TranslogConfig translogConfig;
private final IndexEventListener indexEventListener;
private final QueryCachingPolicy cachingPolicy;
// Package visible for testing
final CircuitBreakerService circuitBreakerService;
private final GlobalCheckpointListeners globalCheckpointListeners;
private final PendingReplicationActions pendingReplicationActions;
private final ReplicationTracker replicationTracker;
protected volatile ShardRouting shardRouting;
protected volatile IndexShardState state;
// ensure happens-before relation between addRefreshListener() and postRecovery()
private final Object postRecoveryMutex = new Object();
protected volatile long pendingPrimaryTerm; // see JavaDocs for getPendingPrimaryTerm
private final Object engineMutex = new Object(); // lock ordering: engineMutex -> mutex
private final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
private volatile EngineFactory engineFactory;
final Collection<Function<IndexSettings, Optional<EngineFactory>>> engineFactoryProviders;
private final IndexingOperationListener indexingOperationListeners;
private final Runnable globalCheckpointSyncer;
private final RetentionLeaseSyncer retentionLeaseSyncer;
Runnable getGlobalCheckpointSyncer() {
return globalCheckpointSyncer;
}
public RetentionLeaseSyncer getRetentionLeaseSyncer() {
return retentionLeaseSyncer;
}
@Nullable
private volatile RecoveryState recoveryState;
private final RecoveryStats recoveryStats = new RecoveryStats();
private final MeanMetric refreshMetric = new MeanMetric();
private final MeanMetric flushMetric = new MeanMetric();
private final CounterMetric periodicFlushMetric = new CounterMetric();
private final ShardEventListener shardEventListener = new ShardEventListener();
private final ShardPath path;
private final IndexShardOperationPermits indexShardOperationPermits;
private static final EnumSet<IndexShardState> READ_ALLOWED_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.POST_RECOVERY);
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
// in state RECOVERING or POST_RECOVERY.
// for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent
// a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source
private static final EnumSet<IndexShardState> WRITE_ALLOWED_STATES = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
/**
* True if this shard is still indexing (recently) and false if we've been idle for long enough (as periodically checked by {@link
* IndexingMemoryController}).
*/
private final AtomicBoolean active = new AtomicBoolean();
/**
* Allows for the registration of listeners that are called when a change becomes visible for search.
*/
private final RefreshListeners refreshListeners;
private final AtomicLong lastSearcherAccess = new AtomicLong();
private final AtomicReference<Translog.Location> pendingRefreshLocation = new AtomicReference<>();
private final RefreshPendingLocationListener refreshPendingLocationListener;
private volatile boolean useRetentionLeasesInPeerRecovery;
public IndexShard(
ShardRouting shardRouting,
IndexSettings indexSettings,
ShardPath path,
Store store,
QueryCache queryCache,
MapperService mapperService,
Collection<Function<IndexSettings, Optional<EngineFactory>>> engineFactoryProviders,
IndexEventListener indexEventListener,
ThreadPool threadPool,
BigArrays bigArrays,
List<IndexingOperationListener> listeners,
Runnable globalCheckpointSyncer,
RetentionLeaseSyncer retentionLeaseSyncer,
CircuitBreakerService circuitBreakerService) throws IOException {
super(shardRouting.shardId(), indexSettings);
assert shardRouting.initializing();
this.shardRouting = shardRouting;
final Settings settings = indexSettings.getSettings();
this.codecService = new CodecService();
Objects.requireNonNull(store, "Store must be provided to the index shard");
this.engineFactoryProviders = engineFactoryProviders;
this.engineFactory = getEngineFactory();
this.store = store;
this.indexEventListener = indexEventListener;
this.threadPool = threadPool;
this.mapperService = mapperService;
this.queryCache = queryCache;
this.indexingOperationListeners = new IndexingOperationListener.CompositeListener(listeners, logger);
this.globalCheckpointSyncer = globalCheckpointSyncer;
this.retentionLeaseSyncer = retentionLeaseSyncer;
state = IndexShardState.CREATED;
this.path = path;
this.circuitBreakerService = circuitBreakerService;
/* create engine config */
logger.debug("state: [CREATED]");
this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP);
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays);
final String aId = shardRouting.allocationId().getId();
final long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardId.id());
this.pendingPrimaryTerm = primaryTerm;
this.globalCheckpointListeners = new GlobalCheckpointListeners(
shardId,
threadPool.scheduler(),
logger
);
this.pendingReplicationActions = new PendingReplicationActions(shardId, threadPool);
this.replicationTracker = new ReplicationTracker(
shardId,
aId,
indexSettings,
primaryTerm,
UNASSIGNED_SEQ_NO,
globalCheckpointListeners::globalCheckpointUpdated,
threadPool::absoluteTimeInMillis,
(retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, aId, getPendingPrimaryTerm(), retentionLeases, listener),
this::getSafeCommitInfo,
pendingReplicationActions);
// the query cache is a node-level thing, however we want the most popular filters
// to be computed on a per-shard basis
if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) {
cachingPolicy = new QueryCachingPolicy() {
@Override
public void onUse(Query query) {
}
@Override
public boolean shouldCache(Query query) {
return true;
}
};
} else {
cachingPolicy = new UsageTrackingQueryCachingPolicy();
}
indexShardOperationPermits = new IndexShardOperationPermits(shardId, threadPool);
refreshListeners = buildRefreshListeners();
lastSearcherAccess.set(threadPool.relativeTimeInMillis());
persistMetadata(path, indexSettings, shardRouting, null, logger);
this.useRetentionLeasesInPeerRecovery = replicationTracker.hasAllPeerRecoveryRetentionLeases();
this.refreshPendingLocationListener = new RefreshPendingLocationListener();
}
public ThreadPool getThreadPool() {
return this.threadPool;
}
public Store store() {
return this.store;
}
public MapperService mapperService() {
return mapperService;
}
/**
* USE THIS METHOD WITH CARE!
* Returns the primary term the index shard is supposed to be on. In case of primary promotion or when a replica learns about
* a new term due to a new primary, the term that's exposed here will not be the term that the shard internally uses to assign
* to operations. The shard will auto-correct its internal operation term, but this might take time.
* See {@link IndexMetadata#primaryTerm(int)}
*/
public long getPendingPrimaryTerm() {
return this.pendingPrimaryTerm;
}
/** Returns the primary term that is currently being used to assign to operations */
public long getOperationPrimaryTerm() {
return replicationTracker.getOperationPrimaryTerm();
}
/**
* Returns the latest cluster routing entry received with this shard.
*/
@Override
public ShardRouting routingEntry() {
return this.shardRouting;
}
@Override
public void updateShardState(final ShardRouting newRouting,
final long newPrimaryTerm,
final BiConsumer<IndexShard, ActionListener<ResyncTask>> primaryReplicaSyncer,
final long applyingClusterStateVersion,
final Set<String> inSyncAllocationIds,
final IndexShardRoutingTable routingTable) throws IOException {
final ShardRouting currentRouting;
synchronized (mutex) {
currentRouting = this.shardRouting;
assert currentRouting != null : "shardRouting must not be null";
if (!newRouting.shardId().equals(shardId())) {
throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId());
}
if (newRouting.isSameAllocation(currentRouting) == false) {
throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
}
if (currentRouting.primary() && newRouting.primary() == false) {
throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current "
+ currentRouting + ", new " + newRouting);
}
if (newRouting.primary()) {
replicationTracker.updateFromMaster(applyingClusterStateVersion, inSyncAllocationIds, routingTable);
}
if (state == IndexShardState.POST_RECOVERY && newRouting.active()) {
assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting;
assert currentRouting.isRelocationTarget() == false || currentRouting.primary() == false ||
replicationTracker.isPrimaryMode() :
"a primary relocation is completed by the master, but primary mode is not active " + currentRouting;
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
} else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isRelocated() &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetadata(currentRouting) == false)) {
// if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery
// failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two
// active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
}
assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED :
"routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state;
persistMetadata(path, indexSettings, newRouting, currentRouting, logger);
final CountDownLatch shardStateUpdated = new CountDownLatch(1);
if (newRouting.primary()) {
if (newPrimaryTerm == pendingPrimaryTerm) {
if (currentRouting.initializing() && newRouting.active()) {
if (currentRouting.isRelocationTarget() == false) {
// the master started a recovering primary, activate primary mode.
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
ensurePeerRecoveryRetentionLeasesExist();
}
}
} else {
assert currentRouting.primary() == false : "term is only increased as part of primary promotion";
/* Note that due to cluster state batching an initializing primary shard term can failed and re-assigned
* in one state causing it's term to be incremented. Note that if both current shard state and new
* shard state are initializing, we could replace the current shard and reinitialize it. It is however
* possible that this shard is being started. This can happen if:
* 1) Shard is post recovery and sends shard started to the master
* 2) Node gets disconnected and rejoins
* 3) Master assigns the shard back to the node
* 4) Master processes the shard started and starts the shard
* 5) The node process the cluster state where the shard is both started and primary term is incremented.
*
* We could fail the shard in that case, but this will cause it to be removed from the insync allocations list
* potentially preventing re-allocation.
*/
assert newRouting.initializing() == false :
"a started primary shard should never update its term; "
+ "shard " + newRouting + ", "
+ "current term [" + pendingPrimaryTerm + "], "
+ "new term [" + newPrimaryTerm + "]";
assert newPrimaryTerm > pendingPrimaryTerm :
"primary terms can only go up; current term [" + pendingPrimaryTerm + "], new term [" + newPrimaryTerm + "]";
/*
* Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we
* increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is
* incremented.
*/
// to prevent primary relocation handoff while resync is not completed
boolean resyncStarted = primaryReplicaResyncInProgress.compareAndSet(false, true);
if (resyncStarted == false) {
throw new IllegalStateException("cannot start resync while it's already in progress");
}
bumpPrimaryTerm(newPrimaryTerm,
() -> {
shardStateUpdated.await();
assert pendingPrimaryTerm == newPrimaryTerm :
"shard term changed on primary. expected [" + newPrimaryTerm + "] but was [" + pendingPrimaryTerm + "]" +
", current routing: " + currentRouting + ", new routing: " + newRouting;
assert getOperationPrimaryTerm() == newPrimaryTerm;
try {
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
ensurePeerRecoveryRetentionLeasesExist();
/*
* If this shard was serving as a replica shard when another shard was promoted to primary then
* its Lucene index was reset during the primary term transition. In particular, the Lucene index
* on this shard was reset to the global checkpoint and the operations above the local checkpoint
* were reverted. If the other shard that was promoted to primary subsequently fails before the
* primary/replica re-sync completes successfully and we are now being promoted, we have to restore
* the reverted operations on this shard by replaying the translog to avoid losing acknowledged writes.
*/
final Engine engine = getEngine();
engine.restoreLocalHistoryFromTranslog((resettingEngine, snapshot) ->
runTranslogRecovery(resettingEngine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {}));
// an index that was created before sequence numbers were introduced may contain operations in its
// translog that do not have a sequence numbers. We want to make sure those operations will never
// be replayed as part of peer recovery to avoid an arbitrary mixture of operations with seq# (due
// to active indexing) and operations without a seq# coming from the translog. We therefore flush
// to create a lucene commit point to an empty translog file.
assert indexSettings.getIndexVersionCreated().onOrAfter(Version.V_4_0_0) :
"version should be on or after 4.0.0 but it is " + indexSettings.getIndexVersionCreated();
/* Rolling the translog generation is not strictly needed here (as we will never have collisions between
* sequence numbers in a translog generation in a new primary as it takes the last known sequence number
* as a starting point), but it simplifies reasoning about the relationship between primary terms and
* translog generations.
*/
engine.rollTranslogGeneration();
engine.fillSeqNoGaps(newPrimaryTerm);
replicationTracker.updateLocalCheckpoint(currentRouting.allocationId().getId(), getLocalCheckpoint());
primaryReplicaSyncer.accept(this, new ActionListener<ResyncTask>() {
@Override
public void onResponse(ResyncTask resyncTask) {
logger.info("primary-replica resync completed with {} operations",
resyncTask.getResyncedOperations());
boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
}
@Override
public void onFailure(Exception e) {
boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
if (state == IndexShardState.CLOSED) {
// ignore, shutting down
} else {
failShard("exception during primary-replica resync", e);
}
}
});
} catch (final AlreadyClosedException e) {
// okay, the index was deleted
}
}, null);
}
}
// set this last, once we finished updating all internal state.
this.shardRouting = newRouting;
assert this.shardRouting.primary() == false ||
this.shardRouting.started() == false || // note that we use started and not active to avoid relocating shards
this.indexShardOperationPermits.isBlocked() || // if permits are blocked, we are still transitioning
this.replicationTracker.isPrimaryMode()
: "a started primary with non-pending operation term must be in primary mode " + this.shardRouting;
shardStateUpdated.countDown();
}
if (currentRouting.active() == false && newRouting.active()) {
indexEventListener.afterIndexShardStarted(this);
}
if (newRouting.equals(currentRouting) == false) {
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
}
if (indexSettings.isSoftDeleteEnabled() && useRetentionLeasesInPeerRecovery == false && state() == IndexShardState.STARTED) {
final RetentionLeases retentionLeases = replicationTracker.getRetentionLeases();
final Set<ShardRouting> shardRoutings = new HashSet<>(routingTable.getShards());
shardRoutings.addAll(routingTable.assignedShards()); // include relocation targets
if (shardRoutings.stream().allMatch(
shr -> shr.assignedToNode() && retentionLeases.contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(shr)))) {
useRetentionLeasesInPeerRecovery = true;
turnOffTranslogRetention();
}
}
}
/**
* Marks the shard as recovering based on a recovery state, fails with exception is recovering is not allowed to be set.
*/
public IndexShardState markAsRecovering(String reason, RecoveryState recoveryState) throws IndexShardStartedException,
IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
if (state == IndexShardState.RECOVERING) {
throw new IndexShardRecoveringException(shardId);
}
if (state == IndexShardState.POST_RECOVERY) {
throw new IndexShardRecoveringException(shardId);
}
this.recoveryState = recoveryState;
return changeState(IndexShardState.RECOVERING, reason);
}
}
private final AtomicBoolean primaryReplicaResyncInProgress = new AtomicBoolean();
/**
* Completes the relocation. Operations are blocked and current operations are drained before changing state to relocated. The provided
* {@link Runnable} is executed after all operations are successfully blocked.
*
* @param consumer a {@link Runnable} that is executed after operations are blocked
* @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation
* @throws IllegalStateException if the relocation target is no longer part of the replication group
* @throws InterruptedException if blocking operations is interrupted
*/
public void relocated(final String targetAllocationId,
final Consumer<ReplicationTracker.PrimaryContext> consumer) throws IllegalIndexShardStateException, IllegalStateException, InterruptedException {
assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting;
try (Releasable forceRefreshes = refreshListeners.forceRefreshes()) {
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
forceRefreshes.close();
// no shard operation permits are being held here, move state from started to relocated
assert indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED :
"in-flight operations in progress while moving shard state to relocated";
/*
* We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a
* network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations.
*/
verifyRelocatingState();
final ReplicationTracker.PrimaryContext primaryContext = replicationTracker.startRelocationHandoff(targetAllocationId);
try {
consumer.accept(primaryContext);
synchronized (mutex) {
verifyRelocatingState();
replicationTracker.completeRelocationHandoff(); // make changes to primaryMode and relocated flag only under mutex
}
} catch (final Exception e) {
try {
replicationTracker.abortRelocationHandoff();
} catch (final Exception inner) {
e.addSuppressed(inner);
}
throw e;
}
});
} catch (TimeoutException e) {
logger.warn("timed out waiting for relocation hand-off to complete");
// This is really bad as ongoing replication operations are preventing this shard from completing relocation hand-off.
// Fail primary relocation source and target shards.
failShard("timed out waiting for relocation hand-off to complete", null);
throw new IndexShardClosedException(shardId(), "timed out waiting for relocation hand-off to complete");
}
}
private void verifyRelocatingState() {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
/*
* If the master cancelled recovery, the target will be removed and the recovery will be cancelled. However, it is still possible
* that we concurrently end up here and therefore have to protect that we do not mark the shard as relocated when its shard routing
* says otherwise.
*/
if (shardRouting.relocating() == false) {
throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED,
": shard is no longer relocating " + shardRouting);
}
if (primaryReplicaResyncInProgress.get()) {
throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED,
": primary relocation is forbidden while primary-replica resync is in progress " + shardRouting);
}
}
@Override
public IndexShardState state() {
return state;
}
/**
* Changes the state of the current shard
*
* @param newState the new shard state
* @param reason the reason for the state change
* @return the previous shard state
*/
private IndexShardState changeState(IndexShardState newState, String reason) {
assert Thread.holdsLock(mutex);
logger.debug("state: [{}]->[{}], reason [{}]", state, newState, reason);
IndexShardState previousState = state;
state = newState;
this.indexEventListener.indexShardStateChanged(this, previousState, newState, reason);
return previousState;
}
public Engine.IndexResult applyIndexOperationOnPrimary(long version,
VersionType versionType,
SourceToParse sourceToParse,
long ifSeqNo,
long ifPrimaryTerm,
long autoGeneratedTimestamp,
boolean isRetry) throws IOException {
assert versionType.validateVersionForWrites(version);
return applyIndexOperation(
getEngine(),
UNASSIGNED_SEQ_NO,
getOperationPrimaryTerm(),
version,
versionType,
ifSeqNo,
ifPrimaryTerm,
autoGeneratedTimestamp,
isRetry,
Engine.Operation.Origin.PRIMARY,
sourceToParse
);
}
public Engine.IndexResult applyIndexOperationOnReplica(long seqNo,
long opPrimaryTerm,
long version,
long autoGeneratedTimeStamp,
boolean isRetry,
SourceToParse sourceToParse) throws IOException {
return applyIndexOperation(
getEngine(),
seqNo,
opPrimaryTerm,
version,
null,
UNASSIGNED_SEQ_NO,
0,
autoGeneratedTimeStamp,
isRetry,
Engine.Operation.Origin.REPLICA,
sourceToParse
);
}
private Engine.IndexResult applyIndexOperation(Engine engine,
long seqNo,
long opPrimaryTerm,
long version,
@Nullable VersionType versionType,
long ifSeqNo,
long ifPrimaryTerm,
long autoGeneratedTimeStamp,
boolean isRetry,
Engine.Operation.Origin origin,
SourceToParse sourceToParse) throws IOException {
assert opPrimaryTerm <= getOperationPrimaryTerm()
: "op term [ " + opPrimaryTerm + " ] > shard term [" + getOperationPrimaryTerm() + "]";
ensureWriteAllowed(origin);
Engine.Index operation;
try {
operation = prepareIndex(
mapperService.documentMapper(),
sourceToParse,
seqNo,
opPrimaryTerm,
version,
versionType,
origin,
autoGeneratedTimeStamp,
isRetry,
ifSeqNo,
ifPrimaryTerm
);
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
return new Engine.IndexResult(update);
}
} catch (Exception e) {
// We treat any exception during parsing and or mapping update as a document level failure
// with the exception side effects of closing the shard. Since we don't have the shard, we
// can not raise an exception that may block any replication of previous operations to the
// replicas
verifyNotClosed(e);
return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo);
}
return index(engine, operation);
}
public static Engine.Index prepareIndex(DocumentMapper docMapper,
SourceToParse source,
long seqNo,
long primaryTerm,
long version,
VersionType versionType,
Engine.Operation.Origin origin,
long autoGeneratedIdTimestamp,
boolean isRetry,
long ifSeqNo,
long ifPrimaryTerm) {
long startTime = System.nanoTime();
ParsedDocument doc = docMapper.parse(source);
Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id()));
return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry,
ifSeqNo, ifPrimaryTerm);
}
public Engine.IndexResult index(Engine.Index index) throws IOException {
assert index.primaryTerm() <= getOperationPrimaryTerm()
: "op term [ " + index.primaryTerm() + " ] > shard term [" + getOperationPrimaryTerm() + "]";
ensureWriteAllowed(index.origin());
try {
return index(getEngine(), index);
} catch (Exception e) {
// We treat any exception during parsing and or mapping update as a document level failure
// with the exception side effects of closing the shard. Since we don't have the shard, we
// can not raise an exception that may block any replication of previous operations to the
// replicas
verifyNotClosed(e);
return new Engine.IndexResult(e, index.version(), index.primaryTerm(), index.seqNo());
}
}
public Engine.IndexResult index(Engine engine, Engine.Index index) throws IOException {
active.set(true);
final Engine.IndexResult result;
index = indexingOperationListeners.preIndex(shardId, index);
boolean traceEnabled = logger.isTraceEnabled();
try {
if (traceEnabled) {
// don't use index.source().utf8ToString() here source might not be valid UTF-8
logger.trace(
"index [{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}]",
index.id(),
index.seqNo(),
routingEntry().allocationId(),
index.primaryTerm(),
getOperationPrimaryTerm(),
index.origin());
}
result = engine.index(index);
if (traceEnabled) {
logger.trace(
"index-done [{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}] " +
"result-seq# [{}] result-term [{}] failure [{}]",
index.id(),
index.seqNo(),
routingEntry().allocationId(),
index.primaryTerm(),
getOperationPrimaryTerm(),
index.origin(),
result.getSeqNo(),
result.getTerm(),
result.getFailure());
}
} catch (Exception e) {
if (traceEnabled) {
logger.trace(new ParameterizedMessage(
"index-fail [{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}]",
index.id(),
index.seqNo(),
routingEntry().allocationId(),
index.primaryTerm(),
getOperationPrimaryTerm(),
index.origin()
), e);
}
indexingOperationListeners.postIndex(shardId, index, e);
throw e;
}
indexingOperationListeners.postIndex(shardId, index, result);
return result;
}
public Engine.NoOpResult markSeqNoAsNoop(long seqNo, long opPrimaryTerm, String reason) throws IOException {
return markSeqNoAsNoop(getEngine(), seqNo, opPrimaryTerm, reason, Engine.Operation.Origin.REPLICA);
}
private Engine.NoOpResult markSeqNoAsNoop(Engine engine, long seqNo, long opPrimaryTerm, String reason,
Engine.Operation.Origin origin) throws IOException {
assert opPrimaryTerm <= getOperationPrimaryTerm()
: "op term [ " + opPrimaryTerm + " ] > shard term [" + getOperationPrimaryTerm() + "]";
long startTime = System.nanoTime();
ensureWriteAllowed(origin);
final Engine.NoOp noOp = new Engine.NoOp(seqNo, opPrimaryTerm, origin, startTime, reason);
return noOp(engine, noOp);
}
private Engine.NoOpResult noOp(Engine engine, Engine.NoOp noOp) throws IOException {
active.set(true);
if (logger.isTraceEnabled()) {
logger.trace("noop (seq# [{}])", noOp.seqNo());
}
return engine.noOp(noOp);
}
public Engine.IndexResult getFailedIndexResult(Exception e, long version) {
return new Engine.IndexResult(e, version);
}
public Engine.DeleteResult applyDeleteOperationOnPrimary(long version,
String id,
VersionType versionType,
long ifSeqNo,
long ifPrimaryTerm) throws IOException {
return applyDeleteOperation(
getEngine(),
UNASSIGNED_SEQ_NO,
getOperationPrimaryTerm(),
version,
id,
versionType,
ifSeqNo,
ifPrimaryTerm,
Engine.Operation.Origin.PRIMARY
);
}
public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo,
long opPrimaryTerm,
long version,
String id) throws IOException {
return applyDeleteOperation(
getEngine(),
seqNo,
opPrimaryTerm,
version,
id,
null,
UNASSIGNED_SEQ_NO,
0,
Engine.Operation.Origin.REPLICA
);
}
private Engine.DeleteResult applyDeleteOperation(Engine engine,
long seqNo,
long opPrimaryTerm,
long version,
String id,
@Nullable VersionType versionType,
long ifSeqNo,
long ifPrimaryTerm,
Engine.Operation.Origin origin) throws IOException {
assert opPrimaryTerm <= getOperationPrimaryTerm()
: "op term [ " + opPrimaryTerm + " ] > shard term [" + getOperationPrimaryTerm() + "]";
ensureWriteAllowed(origin);
final Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(id));
final Engine.Delete delete = prepareDelete(
id,
uid,
seqNo,
opPrimaryTerm,
version,
versionType,
origin,
ifSeqNo,
ifPrimaryTerm
);
return delete(engine, delete);
}
private Engine.Delete prepareDelete(String id,
Term uid,
long seqNo,
long primaryTerm,
long version,
VersionType versionType,
Engine.Operation.Origin origin,
long ifSeqNo,
long ifPrimaryTerm) {
long startTime = System.nanoTime();
return new Engine.Delete(
id,
uid,
seqNo,
primaryTerm,
version,
versionType,
origin,
startTime,
ifSeqNo,
ifPrimaryTerm
);
}
private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException {
active.set(true);
final Engine.DeleteResult result;
delete = indexingOperationListeners.preDelete(shardId, delete);
try {
if (logger.isTraceEnabled()) {
logger.trace("delete [{}] (seq no [{}])", delete.uid().text(), delete.seqNo());
}
result = engine.delete(delete);
} catch (Exception e) {
indexingOperationListeners.postDelete(shardId, delete, e);
throw e;
}
indexingOperationListeners.postDelete(shardId, delete, result);
return result;
}
public Engine.GetResult get(Engine.Get get) {
readAllowed();
return getEngine().get(get, this::acquireSearcher);
}
/**
* Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link AlreadyClosedException}.
*/
public void refresh(String source) {
verifyNotClosed();
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [{}]", source);
}
getEngine().refresh(source);
}
/**
* Returns how many bytes we are currently moving from heap to disk
*/
public long getWritingBytes() {
Engine engine = getEngineOrNull();
if (engine == null) {
return 0;
}
return engine.getWritingBytes();
}
public DocsStats docStats() {
readAllowed();
return getEngine().docStats();
}
/**
* @return {@link CommitStats}
* @throws AlreadyClosedException if shard is closed
*/
public CommitStats commitStats() {
return getEngine().commitStats();
}
/**
* @return {@link SeqNoStats}
* @throws AlreadyClosedException if shard is closed
*/
public SeqNoStats seqNoStats() {
return getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint());
}
public TranslogStats translogStats() {
return getEngine().getTranslogStats();
}
public StoreStats storeStats() {
try {
final RecoveryState recoveryState = this.recoveryState;
final long bytesStillToRecover = recoveryState == null ? -1L : recoveryState.getIndex().bytesStillToRecover();
return store.stats(bytesStillToRecover == -1 ? StoreStats.UNKNOWN_RESERVED_BYTES : bytesStillToRecover);
} catch (IOException e) {
failShard("Failing shard because of exception during storeStats", e);
throw new ElasticsearchException("io exception while building 'store stats'", e);
}
}
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
verifyNotClosed();
logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId);
return getEngine().syncFlush(syncId, expectedCommitId);
}
/**
* Executes the given flush request against the engine.
*
* @param request the flush request
* @return the commit ID
*/
public Engine.CommitId flush(FlushRequest request) {
final boolean waitIfOngoing = request.waitIfOngoing();
final boolean force = request.force();
logger.trace("flush with {}", request);
/*
* We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under
* control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer
* since we use Engine#writeIndexingBuffer for this now.
*/
verifyNotClosed();
final long time = System.nanoTime();
final Engine.CommitId commitId = getEngine().flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time);
return commitId;
}
/**
* checks and removes translog files that no longer need to be retained. See
* {@link org.elasticsearch.index.translog.TranslogDeletionPolicy} for details
*/
public void trimTranslog() {
verifyNotClosed();
final Engine engine = getEngine();
engine.trimUnreferencedTranslogFiles();
}
/**
* Rolls the tranlog generation and cleans unneeded.
*/
public void rollTranslogGeneration() {
final Engine engine = getEngine();
engine.rollTranslogGeneration();
}
public void forceMerge(ForceMergeRequest forceMerge) throws IOException {
verifyActive();
if (logger.isTraceEnabled()) {
logger.trace("force merge with {}", forceMerge);
}
Engine engine = getEngine();
engine.forceMerge(
forceMerge.flush(),
forceMerge.maxNumSegments(),
forceMerge.onlyExpungeDeletes(),
forceMerge.forceMergeUUID()
);
}
public org.apache.lucene.util.Version minimumCompatibleVersion() {
org.apache.lucene.util.Version luceneVersion = null;
for (Segment segment : getEngine().segments(false)) {
if (luceneVersion == null || luceneVersion.onOrAfter(segment.getVersion())) {
luceneVersion = segment.getVersion();
}
}
return luceneVersion == null ? indexSettings.getIndexVersionCreated().luceneVersion : luceneVersion;
}
/**
* Creates a new {@link IndexCommit} snapshot from the currently running engine. All resources referenced by this
* commit won't be freed until the commit / snapshot is closed.
*
* @param flushFirst <code>true</code> if the index should first be flushed to disk / a low level lucene commit should be executed
*/
public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws EngineException {
final IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) {
return getEngine().acquireLastIndexCommit(flushFirst);
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
/**
* Snapshots the most recent safe index commit from the currently running engine.
* All index files referenced by this index commit won't be freed until the commit/snapshot is closed.
*/
public Engine.IndexCommitRef acquireSafeIndexCommit() throws EngineException {
final IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) {
return getEngine().acquireSafeIndexCommit();
} else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
}
}
/**
* gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard,
* without having to worry about the current state of the engine and concurrent flushes.
*
* @throws org.apache.lucene.index.IndexNotFoundException if no index is found in the current directory
* @throws org.apache.lucene.index.CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum
* mismatch or an unexpected exception when opening the index reading the
* segments file.
* @throws org.apache.lucene.index.IndexFormatTooOldException if the lucene index is too old to be opened.
* @throws org.apache.lucene.index.IndexFormatTooNewException if the lucene index is too new to be opened.
* @throws java.io.FileNotFoundException if one or more files referenced by a commit are not present.
* @throws java.nio.file.NoSuchFileException if one or more files referenced by a commit are not present.
*/
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
assert Thread.holdsLock(mutex) == false : "snapshotting store metadata under mutex";
Engine.IndexCommitRef indexCommit = null;
store.incRef();
try {
synchronized (engineMutex) {
// if the engine is not running, we can access the store directly, but we need to make sure no one starts
// the engine on us. If the engine is running, we can get a snapshot via the deletion policy of the engine.
final Engine engine = getEngineOrNull();
if (engine != null) {
indexCommit = engine.acquireLastIndexCommit(false);
}
if (indexCommit == null) {
return store.getMetadata(null, true);
}
}
return store.getMetadata(indexCommit.getIndexCommit());
} finally {
store.decRef();
IOUtils.close(indexCommit);
}
}
/**
* Fails the shard and marks the shard store as corrupted if
* <code>e</code> is caused by index corruption
*/
public void failShard(String reason, @Nullable Exception e) {
// fail the engine. This will cause this shard to also be removed from the node's index service.
getEngine().failEngine(reason, e);
}
public Engine.Searcher acquireSearcher(String source) {
return acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
}
private void markSearcherAccessed() {
lastSearcherAccess.lazySet(threadPool.relativeTimeInMillis());
}
private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) {
readAllowed();
markSearcherAccessed();
return getEngine().acquireSearcher(source, scope);
}
public void close(String reason, boolean flushEngine) throws IOException {
synchronized (engineMutex) {
try {
synchronized (mutex) {
changeState(IndexShardState.CLOSED, reason);
}
} finally {
final Engine engine = this.currentEngineReference.getAndSet(null);
try {
if (engine != null && flushEngine) {
engine.flushAndClose();
}
} finally {
// playing safe here and close the engine even if the above succeeds - close can be called multiple times
// Also closing refreshListeners to prevent us from accumulating any more listeners
IOUtils.close(engine, globalCheckpointListeners, refreshListeners, pendingReplicationActions);
indexShardOperationPermits.close();
}
}
}
}
public void preRecovery() {
final IndexShardState currentState = this.state; // single volatile read
if (currentState == IndexShardState.CLOSED) {
throw new IndexShardNotRecoveringException(shardId, currentState);
}
assert currentState == IndexShardState.RECOVERING : "expected a recovering shard " + shardId + " but got " + currentState;
indexEventListener.beforeIndexShardRecovery(this, indexSettings);
}
public void postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
synchronized (postRecoveryMutex) {
// we need to refresh again to expose all operations that were index until now. Otherwise
// we may not expose operations that were indexed with a refresh listener that was immediately
// responded to in addRefreshListener. The refresh must happen under the same mutex used in addRefreshListener
// and before moving this shard to POST_RECOVERY state (i.e., allow to read from this shard).
getEngine().refresh("post_recovery");
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
}
if (state == IndexShardState.STARTED) {
throw new IndexShardStartedException(shardId);
}
recoveryState.setStage(RecoveryState.Stage.DONE);
changeState(IndexShardState.POST_RECOVERY, reason);
}
}
}
/**
* called before starting to copy index files over
*/
public void prepareForIndexRecovery() {
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
recoveryState.setStage(RecoveryState.Stage.INDEX);
assert currentEngineReference.get() == null;
}
/**
* A best effort to bring up this shard to the global checkpoint using the local translog before performing a peer recovery.
*
* @return a sequence number that an operation-based peer recovery can start with.
* This is the first operation after the local checkpoint of the safe commit if exists.
*/
public long recoverLocallyUpToGlobalCheckpoint() {
assert Thread.holdsLock(mutex) == false : "recover locally under mutex";
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX);
assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]";
final Optional<SequenceNumbers.CommitInfo> safeCommit;
final long globalCheckpoint;
try {
final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
safeCommit = store.findSafeIndexCommit(globalCheckpoint);
} catch (org.apache.lucene.index.IndexNotFoundException e) {
logger.trace("skip local recovery as no index commit found");
return UNASSIGNED_SEQ_NO;
} catch (Exception e) {
logger.debug("skip local recovery as failed to find the safe commit", e);
return UNASSIGNED_SEQ_NO;
}
try {
maybeCheckIndex(); // check index here and won't do it again if ops-based recovery occurs
recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
if (safeCommit.isPresent() == false) {
assert globalCheckpoint == UNASSIGNED_SEQ_NO :
"global checkpoint [" + globalCheckpoint + "] [ created version [" + indexSettings.getIndexVersionCreated() + "]";
logger.trace("skip local recovery as no safe commit found");
return UNASSIGNED_SEQ_NO;
}
assert safeCommit.get().localCheckpoint <= globalCheckpoint : safeCommit.get().localCheckpoint + " > " + globalCheckpoint;
if (safeCommit.get().localCheckpoint == globalCheckpoint) {
logger.trace("skip local recovery as the safe commit is up to date; safe commit {} global checkpoint {}",
safeCommit.get(), globalCheckpoint);
recoveryState.getTranslog().totalLocal(0);
return globalCheckpoint + 1;
}
if (indexSettings.getIndexMetadata().getState() == IndexMetadata.State.CLOSE ||
IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexSettings.getSettings())) {
logger.trace("skip local recovery as the index was closed or not allowed to write; safe commit {} global checkpoint {}",
safeCommit.get(), globalCheckpoint);
recoveryState.getTranslog().totalLocal(0);
return safeCommit.get().localCheckpoint + 1;
}
try {
final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> {
recoveryState.getTranslog().totalLocal(snapshot.totalOperations());
final int recoveredOps = runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY,
recoveryState.getTranslog()::incrementRecoveredOperations);
recoveryState.getTranslog().totalLocal(recoveredOps); // adjust the total local to reflect the actual count
return recoveredOps;
};
innerOpenEngineAndTranslog(() -> globalCheckpoint);
getEngine().recoverFromTranslog(translogRecoveryRunner, globalCheckpoint);
logger.trace("shard locally recovered up to {}", getEngine().getSeqNoStats(globalCheckpoint));
} finally {
synchronized (engineMutex) {
IOUtils.close(currentEngineReference.getAndSet(null));
}
}
} catch (Exception e) {
logger.debug(new ParameterizedMessage("failed to recover shard locally up to global checkpoint {}", globalCheckpoint), e);
return UNASSIGNED_SEQ_NO;
}
try {
// we need to find the safe commit again as we should have created a new one during the local recovery
final Optional<SequenceNumbers.CommitInfo> newSafeCommit = store.findSafeIndexCommit(globalCheckpoint);
assert newSafeCommit.isPresent() : "no safe commit found after local recovery";
return newSafeCommit.get().localCheckpoint + 1;
} catch (Exception e) {
logger.debug(new ParameterizedMessage(
"failed to find the safe commit after recovering shard locally up to global checkpoint {}", globalCheckpoint), e);
return UNASSIGNED_SEQ_NO;
}
}
public void trimOperationOfPreviousPrimaryTerms(long aboveSeqNo) {
getEngine().trimOperationsFromTranslog(getOperationPrimaryTerm(), aboveSeqNo);
}
/**
* Returns the maximum auto_id_timestamp of all append-only requests have been processed by this shard or the auto_id_timestamp received
* from the primary via {@link #updateMaxUnsafeAutoIdTimestamp(long)} at the beginning of a peer-recovery or a primary-replica resync.
*
* @see #updateMaxUnsafeAutoIdTimestamp(long)
*/
public long getMaxSeenAutoIdTimestamp() {
return getEngine().getMaxSeenAutoIdTimestamp();
}
/**
* Since operations stored in soft-deletes do not have max_auto_id_timestamp, the primary has to propagate its max_auto_id_timestamp
* (via {@link #getMaxSeenAutoIdTimestamp()} of all processed append-only requests to replicas at the beginning of a peer-recovery
* or a primary-replica resync to force a replica to disable optimization for all append-only requests which are replicated via
* replication while its retry variants are replicated via recovery without auto_id_timestamp.
* <p>
* Without this force-update, a replica can generate duplicate documents (for the same id) if it first receives
* a retry append-only (without timestamp) via recovery, then an original append-only (with timestamp) via replication.
*/
public void updateMaxUnsafeAutoIdTimestamp(long maxSeenAutoIdTimestampFromPrimary) {
getEngine().updateMaxUnsafeAutoIdTimestamp(maxSeenAutoIdTimestampFromPrimary);
}
public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException {
return applyTranslogOperation(getEngine(), operation, origin);
}
private Engine.Result applyTranslogOperation(Engine engine, Translog.Operation operation,
Engine.Operation.Origin origin) throws IOException {
// If a translog op is replayed on the primary (eg. ccr), we need to use external instead of null for its version type.
final VersionType versionType = (origin == Engine.Operation.Origin.PRIMARY) ? VersionType.EXTERNAL : null;
final Engine.Result result;
switch (operation.opType()) {
case INDEX:
final Translog.Index index = (Translog.Index) operation;
// we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all
// autoGeneratedID docs that are coming from the primary are updated correctly.
result = applyIndexOperation(
engine,
index.seqNo(),
index.primaryTerm(),
index.version(),
versionType,
UNASSIGNED_SEQ_NO,
0,
index.getAutoGeneratedIdTimestamp(),
true,
origin,
new SourceToParse(
shardId.getIndexName(),
index.id(),
index.getSource(),
XContentType.JSON
)
);
break;
case DELETE:
final Translog.Delete delete = (Translog.Delete) operation;
result = applyDeleteOperation(
engine,
delete.seqNo(),
delete.primaryTerm(),
delete.version(),
delete.id(),
versionType,
UNASSIGNED_SEQ_NO,
0,
origin
);
break;
case NO_OP:
final Translog.NoOp noOp = (Translog.NoOp) operation;
result = markSeqNoAsNoop(engine, noOp.seqNo(), noOp.primaryTerm(), noOp.reason(), origin);
break;
default:
throw new IllegalStateException("No operation defined for [" + operation + "]");
}
return result;
}
/**
* Replays translog operations from the provided translog {@code snapshot} to the current engine using the given {@code origin}.
* The callback {@code onOperationRecovered} is notified after each translog operation is replayed successfully.
*/
int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot, Engine.Operation.Origin origin,
Runnable onOperationRecovered) throws IOException {
int opsRecovered = 0;
Translog.Operation operation;
while ((operation = snapshot.next()) != null) {
try {
logger.trace("[translog] recover op {}", operation);
Engine.Result result = applyTranslogOperation(engine, operation, origin);
switch (result.getResultType()) {
case FAILURE:
throw result.getFailure();
case MAPPING_UPDATE_REQUIRED:
throw new IllegalArgumentException("unexpected mapping update: " + result.getRequiredMappingUpdate());
case SUCCESS:
break;
default:
throw new AssertionError("Unknown result type [" + result.getResultType() + "]");
}
opsRecovered++;
onOperationRecovered.run();
} catch (Exception e) {
// TODO: Don't enable this leniency unless users explicitly opt-in
if (origin == Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY && SQLExceptions.status(e) == RestStatus.BAD_REQUEST) {
// mainly for MapperParsingException and Failure to detect xcontent
logger.info("ignoring recovery of a corrupt translog entry", e);
} else {
throw Exceptions.toRuntimeException(e);
}
}
}
return opsRecovered;
}
private void loadGlobalCheckpointToReplicationTracker() throws IOException {
// we have to set it before we open an engine and recover from the translog because
// acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in,
// and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in.
final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint");
}
/**
* opens the engine on top of the existing lucene engine and translog.
* Operations from the translog will be replayed to bring lucene up to date.
**/
public void openEngineAndRecoverFromTranslog() throws IOException {
recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX);
maybeCheckIndex();
recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog();
final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> {
translogRecoveryStats.totalOperations(snapshot.totalOperations());
translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations());
return runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY,
translogRecoveryStats::incrementRecoveredOperations);
};
loadGlobalCheckpointToReplicationTracker();
innerOpenEngineAndTranslog(replicationTracker);
getEngine().recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE);
}
/**
* Opens the engine on top of the existing lucene engine and translog.
* The translog is kept but its operations won't be replayed.
*/
public void openEngineAndSkipTranslogRecovery() throws IOException {
assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]";
recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG);
loadGlobalCheckpointToReplicationTracker();
innerOpenEngineAndTranslog(replicationTracker);
getEngine().skipTranslogRecovery();
}
private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) throws IOException {
assert Thread.holdsLock(mutex) == false : "opening engine under mutex";
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
final EngineConfig config = newEngineConfig(globalCheckpointSupplier);
// we disable deletes since we allow for operations to be executed against the shard while recovering
// but we need to make sure we don't loose deletes until we are done recovering
config.setEnableGcDeletes(false);
updateRetentionLeasesOnReplica(loadRetentionLeases());
assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false || getRetentionLeases().leases().isEmpty()
: "expected empty set of retention leases with recovery source [" + recoveryState.getRecoverySource()
+ "] but got " + getRetentionLeases();
synchronized (engineMutex) {
assert currentEngineReference.get() == null : "engine is running";
verifyNotClosed();
// we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata).
final Engine newEngine = engineFactory.newReadWriteEngine(config);
onNewEngine(newEngine);
currentEngineReference.set(newEngine);
// We set active because we are now writing operations to the engine; this way,
// if we go idle after some time and become inactive, we still give sync'd flush a chance to run.
active.set(true);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
// which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine.
applyEngineSettings();
assert assertSequenceNumbersInCommit();
recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG);
}
private boolean assertSequenceNumbersInCommit() throws IOException {
final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint";
assert userData.containsKey(SequenceNumbers.MAX_SEQ_NO) : "commit point doesn't contains a maximum sequence number";
assert userData.containsKey(Engine.HISTORY_UUID_KEY) : "commit point doesn't contains a history uuid";
assert userData.get(Engine.HISTORY_UUID_KEY).equals(getHistoryUUID()) : "commit point history uuid ["
+ userData.get(Engine.HISTORY_UUID_KEY) + "] is different than engine [" + getHistoryUUID() + "]";
// as of 5.5.0, the engine stores the maxUnsafeAutoIdTimestamp in the commit point.
// This should have baked into the commit by the primary we recover from, regardless of the index age.
assert userData.containsKey(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) :
"opening index which was created post 5.5.0 but " + Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID
+ " is not found in commit";
return true;
}
private void onNewEngine(Engine newEngine) {
assert Thread.holdsLock(engineMutex);
refreshListeners.setCurrentRefreshLocationSupplier(newEngine::getTranslogLastWriteLocation);
}
/**
* called if recovery has to be restarted after network error / delay **
*/
public void performRecoveryRestart() throws IOException {
assert Thread.holdsLock(mutex) == false : "restart recovery under mutex";
synchronized (engineMutex) {
assert refreshListeners.pendingCount() == 0 : "we can't restart with pending listeners";
IOUtils.close(currentEngineReference.getAndSet(null));
resetRecoveryStage();
}
}
/**
* If a file-based recovery occurs, a recovery target calls this method to reset the recovery stage.
*/
public void resetRecoveryStage() {
assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]";
assert currentEngineReference.get() == null;
if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state);
}
recoveryState().setStage(RecoveryState.Stage.INIT);
}
/**
* returns stats about ongoing recoveries, both source and target
*/
public RecoveryStats recoveryStats() {
return recoveryStats;
}
/**
* Returns the current {@link RecoveryState} if this shard is recovering or has been recovering.
* Returns null if the recovery has not yet started or shard was not recovered (created via an API).
*/
@Override
public RecoveryState recoveryState() {
return this.recoveryState;
}
/**
* perform the last stages of recovery once all translog operations are done.
* note that you should still call {@link #postRecovery(String)}.
*/
public void finalizeRecovery() {
recoveryState().setStage(RecoveryState.Stage.FINALIZE);
Engine engine = getEngine();
engine.refresh("recovery_finalization");
engine.config().setEnableGcDeletes(true);
}
/**
* Returns {@code true} if this shard can ignore a recovery attempt made to it (since the already doing/done it)
*/
public boolean ignoreRecoveryAttempt() {
IndexShardState state = state(); // one time volatile read
return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED ||
state == IndexShardState.CLOSED;
}
public void readAllowed() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (READ_ALLOWED_STATES.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " + READ_ALLOWED_STATES.toString());
}
}
/** returns true if the {@link IndexShardState} allows reading */
public boolean isReadAllowed() {
return READ_ALLOWED_STATES.contains(state);
}
private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (origin.isRecovery()) {
if (state != IndexShardState.RECOVERING) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]");
}
} else {
if (origin == Engine.Operation.Origin.PRIMARY) {
assert assertPrimaryMode();
} else if (origin == Engine.Operation.Origin.REPLICA) {
assert assertReplicationTarget();
} else {
assert origin == Engine.Operation.Origin.LOCAL_RESET;
assert getActiveOperationsCount() == OPERATIONS_BLOCKED
: "locally resetting without blocking operations, active operations are [" + getActiveOperations() + "]";
}
if (WRITE_ALLOWED_STATES.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + WRITE_ALLOWED_STATES + ", origin [" + origin + "]");
}
}
}
private boolean assertPrimaryMode() {
assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard " + shardRouting + " is not a primary shard in primary mode";
return true;
}
private boolean assertReplicationTarget() {
assert replicationTracker.isPrimaryMode() == false : "shard " + shardRouting + " in primary mode cannot be a replication target";
return true;
}
private void verifyNotClosed() throws IllegalIndexShardStateException {
verifyNotClosed(null);
}
private void verifyNotClosed(Exception suppressed) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state == IndexShardState.CLOSED) {
final IllegalIndexShardStateException exc = new IndexShardClosedException(shardId, "operation only allowed when not closed");
if (suppressed != null) {
exc.addSuppressed(suppressed);
}
throw exc;
}
}
protected final void verifyActive() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard is active");
}
}
/**
* Returns number of heap bytes used by the indexing buffer for this shard, or 0 if the shard is closed
*/
public long getIndexBufferRAMBytesUsed() {
Engine engine = getEngineOrNull();
if (engine == null) {
return 0;
}
try {
return engine.getIndexBufferRAMBytesUsed();
} catch (AlreadyClosedException ex) {
return 0;
}
}
public void addShardFailureCallback(Consumer<ShardFailure> onShardFailure) {
this.shardEventListener.delegates.add(onShardFailure);
}
/**
* Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last
* indexing operation, and notify listeners that we are now inactive so e.g. sync'd flush can happen.
*/
public void checkIdle(long inactiveTimeNS) {
Engine engineOrNull = getEngineOrNull();
if (engineOrNull != null && System.nanoTime() - engineOrNull.getLastWriteNanos() >= inactiveTimeNS) {
boolean wasActive = active.getAndSet(false);
if (wasActive) {
logger.debug("shard is now inactive");
try {
indexEventListener.onShardInactive(this);
} catch (Exception e) {
logger.warn("failed to notify index event listener", e);
}
}
}
}
public boolean isActive() {
return active.get();
}
public ShardPath shardPath() {
return path;
}
public void recoverFromLocalShards(List<IndexShard> localShards,
ActionListener<Boolean> listener) throws IOException {
assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard";
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " + recoveryState.getRecoverySource();
final List<LocalShardSnapshot> snapshots = new ArrayList<>();
final ActionListener<Boolean> recoveryListener = ActionListener.runBefore(listener, () -> IOUtils.close(snapshots));
boolean success = false;
try {
for (IndexShard shard : localShards) {
snapshots.add(new LocalShardSnapshot(shard));
}
// we are the first primary, recover from the gateway
// if its post api allocation, the index should exists
assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard";
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromLocalShards(this, snapshots, recoveryListener);
success = true;
} finally {
if (success == false) {
IOUtils.close(snapshots);
}
}
}
public void recoverFromStore(ActionListener<Boolean> listener) {
// we are the first primary, recover from the gateway
// if its post api allocation, the index should exists
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
assert shardRouting.initializing() : "can only start recovery on initializing shard";
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromStore(this, listener);
}
public void restoreFromRepository(Repository repository, ActionListener<Boolean> listener) {
try {
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " +
recoveryState.getRecoverySource();
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
storeRecovery.recoverFromRepository(this, repository, listener);
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Tests whether or not the engine should be flushed periodically.
* This test is based on the current size of the translog compared to the configured flush threshold size.
*
* @return {@code true} if the engine should be flushed
*/
boolean shouldPeriodicallyFlush() {
final Engine engine = getEngineOrNull();
if (engine != null) {
try {
return engine.shouldPeriodicallyFlush();
} catch (final AlreadyClosedException e) {
// we are already closed, no need to flush or roll
}
}
return false;
}
/**
* Tests whether or not the translog generation should be rolled to a new generation. This test is based on the size of the current
* generation compared to the configured generation threshold size.
*
* @return {@code true} if the current generation should be rolled to a new generation
*/
boolean shouldRollTranslogGeneration() {
final Engine engine = getEngineOrNull();
if (engine != null) {
try {
return engine.shouldRollTranslogGeneration();
} catch (final AlreadyClosedException e) {
// we are already closed, no need to flush or roll
}
}
return false;
}
public void onSettingsChanged(Settings oldSettings) {
this.indexEventListener.beforeIndexSettingsChangesApplied(this, oldSettings, indexSettings.getSettings());
var newEngineFactory = getEngineFactory();
if (newEngineFactory.getClass() != engineFactory.getClass()) {
try {
// resetEngineToGlobalCheckpoint will use the new engine factory
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, this::resetEngineToGlobalCheckpoint);
} catch (InterruptedException | TimeoutException e) {
var msg = "Timeout exception while trying to switch to a new engine";
failShard(msg, null);
throw new RuntimeException(msg, e);
} catch (IOException e) {
var msg = "IOException while trying to switch to a new engine";
failShard(msg, e);
throw new UncheckedIOException(msg, e);
} catch (AlreadyClosedException e) {
return;
}
}
applyEngineSettings();
}
private void applyEngineSettings() {
Engine engineOrNull = getEngineOrNull();
if (engineOrNull != null) {
final boolean disableTranslogRetention = indexSettings.isSoftDeleteEnabled() && useRetentionLeasesInPeerRecovery;
engineOrNull.onSettingsChanged(
disableTranslogRetention ? TimeValue.MINUS_ONE : indexSettings.getTranslogRetentionAge(),
disableTranslogRetention ? new ByteSizeValue(-1) : indexSettings.getTranslogRetentionSize(),
indexSettings.getSoftDeleteRetentionOperations()
);
}
}
private void turnOffTranslogRetention() {
logger.debug("turn off the translog retention for the replication group {} " +
"as it starts using retention leases exclusively in peer recoveries", shardId);
// Off to the generic threadPool as pruning the delete tombstones can be expensive.
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to turn off translog retention", e);
}
}
@Override
protected void doRun() {
applyEngineSettings();
trimTranslog();
}
});
}
/**
* Acquires a lock on the translog files and Lucene soft-deleted documents to prevent them from being trimmed
*/
public Closeable acquireHistoryRetentionLock(Engine.HistorySource source) {
return getEngine().acquireHistoryRetentionLock(source);
}
/**
* Returns the estimated number of history operations whose seq# at least the provided seq# in this shard.
*/
public int estimateNumberOfHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException {
return getEngine().estimateNumberOfHistoryOperations(reason, source, startingSeqNo);
}
/**
* Creates a new history snapshot for reading operations since the provided starting seqno (inclusive).
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
public Translog.Snapshot getHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException {
return getEngine().readHistoryOperations(reason, source, startingSeqNo);
}
/**
* Checks if we have a completed history of operations since the given starting seqno (inclusive).
* This method should be called after acquiring the retention lock; See {@link #acquireHistoryRetentionLock(Engine.HistorySource)}
*/
public boolean hasCompleteHistoryOperations(String reason, Engine.HistorySource source, long startingSeqNo) throws IOException {
return getEngine().hasCompleteOperationHistory(reason, source, startingSeqNo);
}
/**
* Gets the minimum retained sequence number for this engine.
*
* @return the minimum retained sequence number
*/
public long getMinRetainedSeqNo() {
return getEngine().getMinRetainedSeqNo();
}
/**
* Creates a new changes snapshot for reading operations whose seq_no are between {@code fromSeqNo}(inclusive)
* and {@code toSeqNo}(inclusive). The caller has to close the returned snapshot after finishing the reading.
*
* @param source the source of the request
* @param fromSeqNo the from seq_no (inclusive) to read
* @param toSeqNo the to seq_no (inclusive) to read
* @param requiredFullRange if {@code true} then {@link Translog.Snapshot#next()} will throw {@link IllegalStateException}
* if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing.
* This parameter should be only enabled when the entire requesting range is below the global checkpoint.
*/
public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException {
return getEngine().newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange);
}
public List<Segment> segments(boolean verbose) {
return getEngine().segments(verbose);
}
public String getHistoryUUID() {
return getEngine().getHistoryUUID();
}
public IndexEventListener getIndexEventListener() {
return indexEventListener;
}
public void activateThrottling() {
try {
getEngine().activateThrottling();
} catch (AlreadyClosedException ex) {
// ignore
}
}
public void deactivateThrottling() {
try {
getEngine().deactivateThrottling();
} catch (AlreadyClosedException ex) {
// ignore
}
}
private void handleRefreshException(Exception e) {
if (e instanceof AlreadyClosedException) {
// ignore
} else if (e instanceof RefreshFailedEngineException) {
RefreshFailedEngineException rfee = (RefreshFailedEngineException) e;
if (rfee.getCause() instanceof InterruptedException) {
// ignore, we are being shutdown
} else if (rfee.getCause() instanceof ClosedByInterruptException) {
// ignore, we are being shutdown
} else if (rfee.getCause() instanceof ThreadInterruptedException) {
// ignore, we are being shutdown
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform engine refresh", e);
}
}
} else {
if (state != IndexShardState.CLOSED) {
logger.warn("Failed to perform engine refresh", e);
}
}
}
/**
* Called when our shard is using too much heap and should move buffered indexed/deleted documents to disk.
*/
public void writeIndexingBuffer() {
try {
Engine engine = getEngine();
engine.writeIndexingBuffer();
} catch (Exception e) {
handleRefreshException(e);
}
}
/**
* Notifies the service to update the local checkpoint for the shard with the provided allocation ID. See
* {@link ReplicationTracker#updateLocalCheckpoint(String, long)} for
* details.
*
* @param allocationId the allocation ID of the shard to update the local checkpoint for
* @param checkpoint the local checkpoint for the shard
*/
public void updateLocalCheckpointForShard(final String allocationId, final long checkpoint) {
assert assertPrimaryMode();
verifyNotClosed();
replicationTracker.updateLocalCheckpoint(allocationId, checkpoint);
}
/**
* Update the local knowledge of the persisted global checkpoint for the specified allocation ID.
*
* @param allocationId the allocation ID to update the global checkpoint for
* @param globalCheckpoint the global checkpoint
*/
public void updateGlobalCheckpointForShard(final String allocationId, final long globalCheckpoint) {
assert assertPrimaryMode();
verifyNotClosed();
replicationTracker.updateGlobalCheckpointForShard(allocationId, globalCheckpoint);
}
/**
* Add a global checkpoint listener. If the global checkpoint is equal to or above the global checkpoint the listener is waiting for,
* then the listener will be notified immediately via an executor (so possibly not on the current thread). If the specified timeout
* elapses before the listener is notified, the listener will be notified with an {@link TimeoutException}. A caller may pass null to
* specify no timeout.
*
* @param waitingForGlobalCheckpoint the global checkpoint the listener is waiting for
* @param listener the listener
* @param timeout the timeout
*/
public void addGlobalCheckpointListener(
final long waitingForGlobalCheckpoint,
final GlobalCheckpointListeners.GlobalCheckpointListener listener,
final TimeValue timeout) {
this.globalCheckpointListeners.add(waitingForGlobalCheckpoint, listener, timeout);
}
private void ensureSoftDeletesEnabled(String feature) {
if (indexSettings.isSoftDeleteEnabled() == false) {
String message = feature + " requires soft deletes but " + indexSettings.getIndex() + " does not have soft deletes enabled";
assert false : message;
throw new IllegalStateException(message);
}
}
/**
* Get all non-expired retention leases tracked on this shard.
*
* @return the retention leases
*/
public RetentionLeases getRetentionLeases() {
return getRetentionLeases(false).v2();
}
/**
* If the expire leases parameter is false, gets all retention leases tracked on this shard and otherwise first calculates
* expiration of existing retention leases, and then gets all non-expired retention leases tracked on this shard. Note that only the
* primary shard calculates which leases are expired, and if any have expired, syncs the retention leases to any replicas. If the
* expire leases parameter is true, this replication tracker must be in primary mode.
*
* @return a tuple indicating whether or not any retention leases were expired, and the non-expired retention leases
*/
public Tuple<Boolean, RetentionLeases> getRetentionLeases(final boolean expireLeases) {
assert expireLeases == false || assertPrimaryMode();
verifyNotClosed();
return replicationTracker.getRetentionLeases(expireLeases);
}
public RetentionLeaseStats getRetentionLeaseStats() {
verifyNotClosed();
return new RetentionLeaseStats(getRetentionLeases());
}
/**
* Adds a new retention lease.
*
* @param id the identifier of the retention lease
* @param retainingSequenceNumber the retaining sequence number
* @param source the source of the retention lease
* @param listener the callback when the retention lease is successfully added and synced to replicas
* @return the new retention lease
* @throws IllegalArgumentException if the specified retention lease already exists
*/
public RetentionLease addRetentionLease(
final String id,
final long retainingSequenceNumber,
final String source,
final ActionListener<ReplicationResponse> listener) {
Objects.requireNonNull(listener);
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
try (Closeable ignore = acquireHistoryRetentionLock(Engine.HistorySource.INDEX)) {
final long actualRetainingSequenceNumber =
retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber;
return replicationTracker.addRetentionLease(id, actualRetainingSequenceNumber, source, listener);
} catch (final IOException e) {
throw new AssertionError(e);
}
}
/**
* Renews an existing retention lease.
*
* @param id the identifier of the retention lease
* @param retainingSequenceNumber the retaining sequence number
* @param source the source of the retention lease
* @return the renewed retention lease
* @throws IllegalArgumentException if the specified retention lease does not exist
*/
public RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) {
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
try (Closeable ignore = acquireHistoryRetentionLock(Engine.HistorySource.INDEX)) {
final long actualRetainingSequenceNumber =
retainingSequenceNumber == RETAIN_ALL ? getMinRetainedSeqNo() : retainingSequenceNumber;
return replicationTracker.renewRetentionLease(id, actualRetainingSequenceNumber, source);
} catch (final IOException e) {
throw new AssertionError(e);
}
}
/**
* Removes an existing retention lease.
*
* @param id the identifier of the retention lease
* @param listener the callback when the retention lease is successfully removed and synced to replicas
*/
public void removeRetentionLease(final String id, final ActionListener<ReplicationResponse> listener) {
Objects.requireNonNull(listener);
assert assertPrimaryMode();
verifyNotClosed();
ensureSoftDeletesEnabled("retention leases");
replicationTracker.removeRetentionLease(id, listener);
}
/**
* Updates retention leases on a replica.
*
* @param retentionLeases the retention leases
*/
public void updateRetentionLeasesOnReplica(final RetentionLeases retentionLeases) {
assert assertReplicationTarget();
verifyNotClosed();
replicationTracker.updateRetentionLeasesOnReplica(retentionLeases);
}
/**
* Loads the latest retention leases from their dedicated state file.
*
* @return the retention leases
* @throws IOException if an I/O exception occurs reading the retention leases
*/
public RetentionLeases loadRetentionLeases() throws IOException {
verifyNotClosed();
return replicationTracker.loadRetentionLeases(path.getShardStatePath());
}
/**
* Persists the current retention leases to their dedicated state file.
*
* @throws WriteStateException if an exception occurs writing the state file
*/
public void persistRetentionLeases() throws WriteStateException {
verifyNotClosed();
replicationTracker.persistRetentionLeases(path.getShardStatePath());
}
public boolean assertRetentionLeasesPersisted() throws IOException {
return replicationTracker.assertRetentionLeasesPersisted(path.getShardStatePath());
}
/**
* Syncs the current retention leases to all replicas.
*/
public void syncRetentionLeases() {
assert assertPrimaryMode();
verifyNotClosed();
replicationTracker.renewPeerRecoveryRetentionLeases();
final Tuple<Boolean, RetentionLeases> retentionLeases = getRetentionLeases(true);
if (retentionLeases.v1()) {
logger.trace("syncing retention leases [{}] after expiration check", retentionLeases.v2());
retentionLeaseSyncer.sync(
shardId,
shardRouting.allocationId().getId(),
getPendingPrimaryTerm(),
retentionLeases.v2(),
ActionListener.wrap(
r -> {},
e -> logger.warn(
new ParameterizedMessage(
"failed to sync retention leases [{}] after expiration check", retentionLeases),
e
)
)
);
} else {
logger.trace("background syncing retention leases [{}] after expiration check", retentionLeases.v2());
retentionLeaseSyncer.backgroundSync(
shardId, shardRouting.allocationId().getId(), getPendingPrimaryTerm(), retentionLeases.v2());
}
}
/**
* Called when the recovery process for a shard has opened the engine on the target shard. Ensures that the right data structures
* have been set up locally to track local checkpoint information for the shard and that the shard is added to the replication group.
*
* @param allocationId the allocation ID of the shard for which recovery was initiated
*/
public void initiateTracking(final String allocationId) {
assert assertPrimaryMode();
replicationTracker.initiateTracking(allocationId);
}
/**
* Marks the shard with the provided allocation ID as in-sync with the primary shard. See
* {@link ReplicationTracker#markAllocationIdAsInSync(String, long)}
* for additional details.
*
* @param allocationId the allocation ID of the shard to mark as in-sync
* @param localCheckpoint the current local checkpoint on the shard
*/
public void markAllocationIdAsInSync(final String allocationId, final long localCheckpoint) throws InterruptedException {
assert assertPrimaryMode();
replicationTracker.markAllocationIdAsInSync(allocationId, localCheckpoint);
}
/**
* Returns the persisted local checkpoint for the shard.
*
* @return the local checkpoint
*/
public long getLocalCheckpoint() {
return getEngine().getPersistedLocalCheckpoint();
}
/**
* Returns the global checkpoint for the shard.
*
* @return the global checkpoint
*/
public long getLastKnownGlobalCheckpoint() {
return replicationTracker.getGlobalCheckpoint();
}
/**
* Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint)
*/
public long getLastSyncedGlobalCheckpoint() {
return getEngine().getLastSyncedGlobalCheckpoint();
}
/**
* Get the local knowledge of the global checkpoints for all in-sync allocation IDs.
*
* @return a map from allocation ID to the local knowledge of the global checkpoint for that allocation ID
*/
public ObjectLongMap<String> getInSyncGlobalCheckpoints() {
assert assertPrimaryMode();
verifyNotClosed();
return replicationTracker.getInSyncGlobalCheckpoints();
}
/**
* Syncs the global checkpoint to the replicas if the global checkpoint on at least one replica is behind the global checkpoint on the
* primary.
*/
public void maybeSyncGlobalCheckpoint(final String reason) {
verifyNotClosed();
assert shardRouting.primary() : "only call maybeSyncGlobalCheckpoint on primary shard";
if (replicationTracker.isPrimaryMode() == false) {
return;
}
assert assertPrimaryMode();
// only sync if there are no operations in flight, or when using async durability
final SeqNoStats stats = getEngine().getSeqNoStats(replicationTracker.getGlobalCheckpoint());
final boolean asyncDurability = indexSettings().getTranslogDurability() == Translog.Durability.ASYNC;
if (stats.getMaxSeqNo() == stats.getGlobalCheckpoint() || asyncDurability) {
final ObjectLongMap<String> globalCheckpoints = getInSyncGlobalCheckpoints();
final long globalCheckpoint = replicationTracker.getGlobalCheckpoint();
// async durability means that the local checkpoint might lag (as it is only advanced on fsync)
// periodically ask for the newest local checkpoint by syncing the global checkpoint, so that ultimately the global
// checkpoint can be synced. Also take into account that a shard might be pending sync, which means that it isn't
// in the in-sync set just yet but might be blocked on waiting for its persisted local checkpoint to catch up to
// the global checkpoint.
final boolean syncNeeded =
(asyncDurability && (stats.getGlobalCheckpoint() < stats.getMaxSeqNo() || replicationTracker.pendingInSync()))
// check if the persisted global checkpoint
|| StreamSupport
.stream(globalCheckpoints.values().spliterator(), false)
.anyMatch(v -> v.value < globalCheckpoint);
// only sync if index is not closed and there is a shard lagging the primary
if (syncNeeded && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN) {
logger.trace("syncing global checkpoint for [{}]", reason);
globalCheckpointSyncer.run();
}
}
}
/**
* Returns the current replication group for the shard.
*
* @return the replication group
*/
public ReplicationGroup getReplicationGroup() {
assert assertPrimaryMode();
verifyNotClosed();
ReplicationGroup replicationGroup = replicationTracker.getReplicationGroup();
// PendingReplicationActions is dependent on ReplicationGroup. Every time we expose ReplicationGroup,
// ensure PendingReplicationActions is updated with the newest version to prevent races.
pendingReplicationActions.accept(replicationGroup);
return replicationGroup;
}
/**
* Returns the pending replication actions for the shard.
*
* @return the pending replication actions
*/
public PendingReplicationActions getPendingReplicationActions() {
assert assertPrimaryMode();
verifyNotClosed();
return pendingReplicationActions;
}
/**
* Updates the global checkpoint on a replica shard after it has been updated by the primary.
*
* @param globalCheckpoint the global checkpoint
* @param reason the reason the global checkpoint was updated
*/
public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final String reason) {
assert assertReplicationTarget();
final long localCheckpoint = getLocalCheckpoint();
if (globalCheckpoint > localCheckpoint) {
/*
* This can happen during recovery when the shard has started its engine but recovery is not finalized and is receiving global
* checkpoint updates. However, since this shard is not yet contributing to calculating the global checkpoint, it can be the
* case that the global checkpoint update from the primary is ahead of the local checkpoint on this shard. In this case, we
* ignore the global checkpoint update. This can happen if we are in the translog stage of recovery. Prior to this, the engine
* is not opened and this shard will not receive global checkpoint updates, and after this the shard will be contributing to
* calculations of the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as
* while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move
* to recovery finalization, or even finished recovery before the update arrives here.
*/
assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED :
"supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " +
"that is higher than its local checkpoint [" + localCheckpoint + "]";
return;
}
replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, reason);
}
/**
* Updates the known allocation IDs and the local checkpoints for the corresponding allocations from a primary relocation source.
*
* @param primaryContext the sequence number context
*/
public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) {
assert shardRouting.primary() && shardRouting.isRelocationTarget() :
"only primary relocation target can update allocation IDs from primary context: " + shardRouting;
assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) :
"primary context [" + primaryContext + "] does not contain relocation target [" + routingEntry() + "]";
assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId())
.getLocalCheckpoint() || indexSettings().getTranslogDurability() == Translog.Durability.ASYNC :
"local checkpoint [" + getLocalCheckpoint() + "] does not match checkpoint from primary context [" + primaryContext + "]";
synchronized (mutex) {
replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex
}
ensurePeerRecoveryRetentionLeasesExist();
}
private void ensurePeerRecoveryRetentionLeasesExist() {
threadPool.generic().execute(() -> replicationTracker.createMissingPeerRecoveryRetentionLeases(ActionListener.wrap(
r -> logger.trace("created missing peer recovery retention leases"),
e -> logger.debug("failed creating missing peer recovery retention leases", e))));
}
/**
* Check if there are any recoveries pending in-sync.
*
* @return {@code true} if there is at least one shard pending in-sync, otherwise false
*/
public boolean pendingInSync() {
assert assertPrimaryMode();
return replicationTracker.pendingInSync();
}
public void maybeCheckIndex() {
recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX);
if (Booleans.isTrue(checkIndexOnStartup) || "checksum".equals(checkIndexOnStartup)) {
try {
checkIndex();
} catch (IOException ex) {
throw new RecoveryFailedException(recoveryState, "check index failed", ex);
}
}
}
void checkIndex() throws IOException {
if (store.tryIncRef()) {
try {
doCheckIndex();
} catch (IOException e) {
store.markStoreCorrupted(e);
throw e;
} finally {
store.decRef();
}
}
}
private void doCheckIndex() throws IOException {
final long timeNS = System.nanoTime();
if (!Lucene.indexExists(store.directory())) {
return;
}
BytesStreamOutput os = new BytesStreamOutput();
PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
if ("checksum".equals(checkIndexOnStartup)) {
// physical verification only: verify all checksums for the latest commit
IOException corrupt = null;
MetadataSnapshot metadata = snapshotStoreMetadata();
for (Map.Entry<String, StoreFileMetadata> entry : metadata.asMap().entrySet()) {
try {
Store.checkIntegrity(entry.getValue(), store.directory());
out.println("checksum passed: " + entry.getKey());
} catch (IOException exc) {
out.println("checksum failed: " + entry.getKey());
exc.printStackTrace(out);
corrupt = exc;
}
}
out.flush();
if (corrupt != null) {
logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
throw corrupt;
}
} else {
// full checkindex
final CheckIndex.Status status = store.checkIndex(out);
out.flush();
if (!status.clean) {
if (state == IndexShardState.CLOSED) {
// ignore if closed....
return;
}
logger.warn("check index [failure]\n{}", os.bytes().utf8ToString());
throw new IOException("index check failure");
}
}
if (logger.isDebugEnabled()) {
logger.debug("check index [success]\n{}", os.bytes().utf8ToString());
}
recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - timeNS)));
}
Engine getEngine() {
Engine engine = getEngineOrNull();
if (engine == null) {
throw new AlreadyClosedException("engine is closed");
}
return engine;
}
/**
* NOTE: returns null if engine is not yet started (e.g. recovery phase 1, copying over index files, is still running), or if engine is
* closed.
*/
public Engine getEngineOrNull() {
return this.currentEngineReference.get();
}
public void startRecovery(RecoveryState recoveryState,
PeerRecoveryTargetService recoveryTargetService,
PeerRecoveryTargetService.RecoveryListener recoveryListener,
RepositoriesService repositoriesService,
IndicesService indicesService) {
// TODO: Create a proper object to encapsulate the recovery context
// all of the current methods here follow a pattern of:
// resolve context which isn't really dependent on the local shards and then async
// call some external method with this pointer.
// with a proper recovery context object we can simply change this to:
// startRecovery(RecoveryState recoveryState, ShardRecoverySource source ) {
// markAsRecovery("from " + source.getShortDescription(), recoveryState);
// threadPool.generic().execute() {
// onFailure () { listener.failure() };
// doRun() {
// if (source.recover(this)) {
// recoveryListener.onRecoveryDone(recoveryState);
// }
// }
// }}
// }
assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource());
switch (recoveryState.getRecoverySource().getType()) {
case EMPTY_STORE:
case EXISTING_STORE:
executeRecovery("from store", recoveryState, recoveryListener, this::recoverFromStore);
break;
case PEER:
try {
markAsRecovering("from " + recoveryState.getSourceNode(), recoveryState);
recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener);
} catch (Exception e) {
failShard("corrupted preexisting index", e);
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
}
break;
case SNAPSHOT:
final String repo = ((SnapshotRecoverySource) recoveryState.getRecoverySource()).snapshot().getRepository();
executeRecovery(
"from snapshot",
recoveryState,
recoveryListener,
l -> restoreFromRepository(repositoriesService.repository(repo), l)
);
break;
case LOCAL_SHARDS:
final IndexMetadata indexMetadata = indexSettings().getIndexMetadata();
final Index resizeSourceIndex = indexMetadata.getResizeSourceIndex();
final List<IndexShard> startedShards = new ArrayList<>();
final IndexService sourceIndexService = indicesService.indexService(resizeSourceIndex);
final Set<ShardId> requiredShards;
final int numShards;
if (sourceIndexService != null) {
requiredShards = IndexMetadata.selectRecoverFromShards(shardId().id(),
sourceIndexService.getMetadata(), indexMetadata.getNumberOfShards());
for (IndexShard shard : sourceIndexService) {
if (shard.state() == IndexShardState.STARTED && requiredShards.contains(shard.shardId())) {
startedShards.add(shard);
}
}
numShards = requiredShards.size();
} else {
numShards = -1;
requiredShards = Collections.emptySet();
}
if (numShards == startedShards.size()) {
assert requiredShards.isEmpty() == false;
executeRecovery("from local shards", recoveryState, recoveryListener,
l -> recoverFromLocalShards(
startedShards.stream().filter((s) -> requiredShards.contains(s.shardId())).collect(Collectors.toList()),
l
));
} else {
final RuntimeException e;
if (numShards == -1) {
e = new IndexNotFoundException(resizeSourceIndex);
} else {
e = new IllegalStateException("not all required shards of index " + resizeSourceIndex
+ " are started yet, expected " + numShards + " found " + startedShards.size() + " can't recover shard "
+ shardId());
}
throw e;
}
break;
default:
throw new IllegalArgumentException("Unknown recovery source " + recoveryState.getRecoverySource());
}
}
private void executeRecovery(String reason,
RecoveryState recoveryState,
PeerRecoveryTargetService.RecoveryListener recoveryListener,
CheckedConsumer<ActionListener<Boolean>, Exception> action) {
markAsRecovering(reason, recoveryState); // mark the shard as recovering on the cluster state thread
threadPool.generic().execute(ActionRunnable.wrap(ActionListener.wrap(
r -> {
if (r) {
recoveryListener.onRecoveryDone(recoveryState);
}
},
e -> recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true)), action));
}
/**
* Returns whether the shard is a relocated primary, i.e. not in charge anymore of replicating changes (see {@link ReplicationTracker}).
*/
public boolean isRelocatedPrimary() {
assert shardRouting.primary() : "only call isRelocatedPrimary on primary shard";
return replicationTracker.isRelocated();
}
public RetentionLease addPeerRecoveryRetentionLease(String nodeId, long globalCheckpoint,
ActionListener<ReplicationResponse> listener) {
assert assertPrimaryMode();
// only needed for BWC reasons involving rolling upgrades from versions that do not support PRRLs:
assert indexSettings.getIndexVersionCreated().before(Version.V_4_3_0) || indexSettings.isSoftDeleteEnabled() == false;
return replicationTracker.addPeerRecoveryRetentionLease(nodeId, globalCheckpoint, listener);
}
public RetentionLease cloneLocalPeerRecoveryRetentionLease(String nodeId, ActionListener<ReplicationResponse> listener) {
assert assertPrimaryMode();
return replicationTracker.cloneLocalPeerRecoveryRetentionLease(nodeId, listener);
}
public void removePeerRecoveryRetentionLease(String nodeId, ActionListener<ReplicationResponse> listener) {
assert assertPrimaryMode();
replicationTracker.removePeerRecoveryRetentionLease(nodeId, listener);
}
/**
* Returns a list of retention leases for peer recovery installed in this shard copy.
*/
public List<RetentionLease> getPeerRecoveryRetentionLeases() {
return replicationTracker.getPeerRecoveryRetentionLeases();
}
public boolean useRetentionLeasesInPeerRecovery() {
return useRetentionLeasesInPeerRecovery;
}
private SafeCommitInfo getSafeCommitInfo() {
final Engine engine = getEngineOrNull();
return engine == null ? SafeCommitInfo.EMPTY : engine.getSafeCommitInfo();
}
class ShardEventListener implements Engine.EventListener {
private final CopyOnWriteArrayList<Consumer<ShardFailure>> delegates = new CopyOnWriteArrayList<>();
// called by the current engine
@Override
public void onFailedEngine(String reason, @Nullable Exception failure) {
final ShardFailure shardFailure = new ShardFailure(shardRouting, reason, failure);
for (Consumer<ShardFailure> listener : delegates) {
try {
listener.accept(shardFailure);
} catch (Exception inner) {
inner.addSuppressed(failure);
logger.warn("exception while notifying engine failure", inner);
}
}
}
}
private static void persistMetadata(
final ShardPath shardPath,
final IndexSettings indexSettings,
final ShardRouting newRouting,
final @Nullable ShardRouting currentRouting,
final Logger logger) throws IOException {
assert newRouting != null : "newRouting must not be null";
// only persist metadata if routing information that is persisted in shard state metadata actually changed
final ShardId shardId = newRouting.shardId();
if (currentRouting == null
|| currentRouting.primary() != newRouting.primary()
|| currentRouting.allocationId().equals(newRouting.allocationId()) == false) {
assert currentRouting == null || currentRouting.isSameAllocation(newRouting);
final String writeReason;
if (currentRouting == null) {
writeReason = "initial state with allocation id [" + newRouting.allocationId() + "]";
} else {
writeReason = "routing changed from " + currentRouting + " to " + newRouting;
}
logger.trace("{} writing shard state, reason [{}]", shardId, writeReason);
final ShardStateMetadata newShardStateMetadata =
new ShardStateMetadata(newRouting.primary(), indexSettings.getUUID(), newRouting.allocationId());
ShardStateMetadata.FORMAT.write(newShardStateMetadata, shardPath.getShardStatePath());
} else {
logger.trace("{} skip writing shard state, has been written before", shardId);
}
}
private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) {
return new EngineConfig(
shardId,
threadPool,
indexSettings,
store,
indexSettings.getMergePolicy(),
mapperService == null ? null : mapperService.indexAnalyzer(),
codecService,
shardEventListener,
queryCache,
cachingPolicy,
translogConfig,
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
List.of(refreshListeners, refreshPendingLocationListener),
Collections.singletonList(new RefreshMetricUpdater(refreshMetric)),
circuitBreakerService,
globalCheckpointSupplier,
replicationTracker::getRetentionLeases,
this::getOperationPrimaryTerm,
tombstoneDocSupplier()
);
}
/**
* Acquire a primary operation permit whenever the shard is ready for indexing. If a permit is directly available, the provided
* ActionListener will be called on the calling thread. During relocation hand-off, permit acquisition can be delayed. The provided
* ActionListener will then be called using the provided executor.
*
* @param debugInfo an extra information that can be useful when tracing an unreleased permit. When assertions are enabled
* the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object
* isn't used
*/
public void acquirePrimaryOperationPermit(ActionListener<Releasable> onPermitAcquired, String executorOnDelay, Object debugInfo) {
acquirePrimaryOperationPermit(onPermitAcquired, executorOnDelay, debugInfo, false);
}
public void acquirePrimaryOperationPermit(ActionListener<Releasable> onPermitAcquired, String executorOnDelay, Object debugInfo,
boolean forceExecution) {
verifyNotClosed();
assert shardRouting.primary() : "acquirePrimaryOperationPermit should only be called on primary shard: " + shardRouting;
indexShardOperationPermits.acquire(wrapPrimaryOperationPermitListener(onPermitAcquired), executorOnDelay, forceExecution, debugInfo);
}
/**
* Acquire all primary operation permits. Once all permits are acquired, the provided ActionListener is called.
* It is the responsibility of the caller to close the {@link Releasable}.
*/
public void acquireAllPrimaryOperationsPermits(final ActionListener<Releasable> onPermitAcquired, final TimeValue timeout) {
verifyNotClosed();
assert shardRouting.primary() : "acquireAllPrimaryOperationsPermits should only be called on primary shard: " + shardRouting;
asyncBlockOperations(wrapPrimaryOperationPermitListener(onPermitAcquired), timeout.duration(), timeout.timeUnit());
}
/**
* Wraps the action to run on a primary after acquiring permit. This wrapping is used to check if the shard is in primary mode before
* executing the action.
*
* @param listener the listener to wrap
* @return the wrapped listener
*/
private ActionListener<Releasable> wrapPrimaryOperationPermitListener(final ActionListener<Releasable> listener) {
return ActionListener.delegateFailure(
listener,
(l, r) -> {
if (replicationTracker.isPrimaryMode()) {
l.onResponse(r);
} else {
r.close();
l.onFailure(new ShardNotInPrimaryModeException(shardId, state));
}
}
);
}
private void asyncBlockOperations(ActionListener<Releasable> onPermitAcquired, long timeout, TimeUnit timeUnit) {
final Releasable forceRefreshes = refreshListeners.forceRefreshes();
final ActionListener<Releasable> wrappedListener = ActionListener.wrap(
r -> {
forceRefreshes.close();
onPermitAcquired.onResponse(r);
},
e -> {
forceRefreshes.close();
onPermitAcquired.onFailure(e);
}
);
try {
indexShardOperationPermits.asyncBlockOperations(wrappedListener, timeout, timeUnit);
} catch (Exception e) {
forceRefreshes.close();
throw e;
}
}
/**
* Runs the specified runnable under a permit and otherwise calling back the specified failure callback. This method is really a
* convenience for {@link #acquirePrimaryOperationPermit(ActionListener, String, Object)} where the listener equates to
* try-with-resources closing the releasable after executing the runnable on successfully acquiring the permit, an otherwise calling
* back the failure callback.
*
* @param runnable the runnable to execute under permit
* @param onFailure the callback on failure
* @param executorOnDelay the executor to execute the runnable on if permit acquisition is blocked
* @param debugInfo debug info
*/
public void runUnderPrimaryPermit(
final Runnable runnable,
final Consumer<Exception> onFailure,
final String executorOnDelay,
final Object debugInfo) {
verifyNotClosed();
assert shardRouting.primary() : "runUnderPrimaryPermit should only be called on primary shard but was " + shardRouting;
final ActionListener<Releasable> onPermitAcquired = ActionListener.wrap(
releasable -> {
try (Releasable ignore = releasable) {
runnable.run();
}
},
onFailure);
acquirePrimaryOperationPermit(onPermitAcquired, executorOnDelay, debugInfo);
}
private <E extends Exception> void bumpPrimaryTerm(long newPrimaryTerm,
final CheckedRunnable<E> onBlocked,
@Nullable ActionListener<Releasable> combineWithAction) {
assert Thread.holdsLock(mutex);
assert newPrimaryTerm > pendingPrimaryTerm || (newPrimaryTerm >= pendingPrimaryTerm && combineWithAction != null);
assert getOperationPrimaryTerm() <= pendingPrimaryTerm;
final CountDownLatch termUpdated = new CountDownLatch(1);
asyncBlockOperations(new ActionListener<Releasable>() {
@Override
public void onFailure(final Exception e) {
try {
innerFail(e);
} finally {
if (combineWithAction != null) {
combineWithAction.onFailure(e);
}
}
}
private void innerFail(final Exception e) {
try {
failShard("exception during primary term transition", e);
} catch (AlreadyClosedException ace) {
// ignore, shard is already closed
}
}
@Override
public void onResponse(final Releasable releasable) {
final RunOnce releaseOnce = new RunOnce(releasable::close);
try {
assert getOperationPrimaryTerm() <= pendingPrimaryTerm;
termUpdated.await();
// indexShardOperationPermits doesn't guarantee that async submissions are executed
// in the order submitted. We need to guard against another term bump
if (getOperationPrimaryTerm() < newPrimaryTerm) {
replicationTracker.setOperationPrimaryTerm(newPrimaryTerm);
onBlocked.run();
}
} catch (final Exception e) {
if (combineWithAction == null) {
// otherwise leave it to combineWithAction to release the permit
releaseOnce.run();
}
innerFail(e);
} finally {
if (combineWithAction != null) {
combineWithAction.onResponse(releasable);
} else {
releaseOnce.run();
}
}
}
}, 30, TimeUnit.MINUTES);
pendingPrimaryTerm = newPrimaryTerm;
termUpdated.countDown();
}
/**
* Acquire a replica operation permit whenever the shard is ready for indexing (see
* {@link #acquirePrimaryOperationPermit(ActionListener, String, Object)}). If the given primary term is lower than then one in
* {@link #shardRouting}, the {@link ActionListener#onFailure(Exception)} method of the provided listener is invoked with an
* {@link IllegalStateException}. If permit acquisition is delayed, the listener will be invoked on the executor with the specified
* name.
*
* @param opPrimaryTerm the operation primary term
* @param globalCheckpoint the global checkpoint associated with the request
* @param maxSeqNoOfUpdatesOrDeletes the max seq_no of updates (index operations overwrite Lucene) or deletes captured on the primary
* after this replication request was executed on it (see {@link #getMaxSeqNoOfUpdatesOrDeletes()}
* @param onPermitAcquired the listener for permit acquisition
* @param executorOnDelay the name of the executor to invoke the listener on if permit acquisition is delayed
* @param debugInfo an extra information that can be useful when tracing an unreleased permit. When assertions are
* enabled the tracing will capture the supplied object's {@link Object#toString()} value.
* Otherwise the object isn't used
*/
public void acquireReplicaOperationPermit(final long opPrimaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes,
final ActionListener<Releasable> onPermitAcquired,
final String executorOnDelay,
final Object debugInfo) {
innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired, false,
(listener) -> indexShardOperationPermits.acquire(listener, executorOnDelay, true, debugInfo));
}
/**
* Acquire all replica operation permits whenever the shard is ready for indexing (see
* {@link #acquireAllPrimaryOperationsPermits(ActionListener, TimeValue)}. If the given primary term is lower than then one in
* {@link #shardRouting}, the {@link ActionListener#onFailure(Exception)} method of the provided listener is invoked with an
* {@link IllegalStateException}.
*
* @param opPrimaryTerm the operation primary term
* @param globalCheckpoint the global checkpoint associated with the request
* @param maxSeqNoOfUpdatesOrDeletes the max seq_no of updates (index operations overwrite Lucene) or deletes captured on the primary
* after this replication request was executed on it (see {@link #getMaxSeqNoOfUpdatesOrDeletes()}
* @param onPermitAcquired the listener for permit acquisition
* @param timeout the maximum time to wait for the in-flight operations block
*/
public void acquireAllReplicaOperationsPermits(final long opPrimaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes,
final ActionListener<Releasable> onPermitAcquired,
final TimeValue timeout) {
innerAcquireReplicaOperationPermit(
opPrimaryTerm,
globalCheckpoint,
maxSeqNoOfUpdatesOrDeletes,
onPermitAcquired,
true,
(listener) -> asyncBlockOperations(listener, timeout.duration(), timeout.timeUnit())
);
}
private void innerAcquireReplicaOperationPermit(final long opPrimaryTerm,
final long globalCheckpoint,
final long maxSeqNoOfUpdatesOrDeletes,
final ActionListener<Releasable> onPermitAcquired,
final boolean allowCombineOperationWithPrimaryTermUpdate,
final Consumer<ActionListener<Releasable>> operationExecutor) {
verifyNotClosed();
// This listener is used for the execution of the operation. If the operation requires all the permits for its
// execution and the primary term must be updated first, we can combine the operation execution with the
// primary term update. Since indexShardOperationPermits doesn't guarantee that async submissions are executed
// in the order submitted, combining both operations ensure that the term is updated before the operation is
// executed. It also has the side effect of acquiring all the permits one time instead of two.
final ActionListener<Releasable> operationListener = ActionListener.delegateFailure(
onPermitAcquired,
(delegatedListener, releasable) -> {
if (opPrimaryTerm < getOperationPrimaryTerm()) {
releasable.close();
final String message = String.format(
Locale.ROOT,
"%s operation primary term [%d] is too old (current [%d])",
shardId,
opPrimaryTerm,
getOperationPrimaryTerm());
delegatedListener.onFailure(new IllegalStateException(message));
} else {
assert assertReplicationTarget();
try {
updateGlobalCheckpointOnReplica(globalCheckpoint, "operation");
advanceMaxSeqNoOfUpdatesOrDeletes(maxSeqNoOfUpdatesOrDeletes);
} catch (Exception e) {
releasable.close();
delegatedListener.onFailure(e);
return;
}
delegatedListener.onResponse(releasable);
}
}
);
if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) {
synchronized (mutex) {
if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) {
final IndexShardState shardState = state();
// only roll translog and update primary term if shard has made it past recovery
// Having a new primary term here means that the old primary failed and that there is a new primary, which again
// means that the master will fail this shard as all initializing shards are failed when a primary is selected
// We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint
if (shardState != IndexShardState.POST_RECOVERY &&
shardState != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, shardState);
}
bumpPrimaryTerm(opPrimaryTerm, () -> {
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
final long currentGlobalCheckpoint = getLastKnownGlobalCheckpoint();
final long maxSeqNo = seqNoStats().getMaxSeqNo();
logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]",
opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo);
if (currentGlobalCheckpoint < maxSeqNo) {
resetEngineToGlobalCheckpoint();
} else {
getEngine().rollTranslogGeneration();
}
}, allowCombineOperationWithPrimaryTermUpdate ? operationListener : null);
if (allowCombineOperationWithPrimaryTermUpdate) {
logger.debug("operation execution has been combined with primary term update");
return;
}
}
}
}
assert opPrimaryTerm <= pendingPrimaryTerm
: "operation primary term [" + opPrimaryTerm + "] should be at most [" + pendingPrimaryTerm + "]";
operationExecutor.accept(operationListener);
}
private boolean requirePrimaryTermUpdate(final long opPrimaryTerm, final boolean allPermits) {
return (opPrimaryTerm > pendingPrimaryTerm) || (allPermits && opPrimaryTerm > getOperationPrimaryTerm());
}
public static final int OPERATIONS_BLOCKED = -1;
/**
* Obtain the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} if all permits are held (even if there are
* outstanding operations in flight).
*
* @return the active operation count, or {@link IndexShard#OPERATIONS_BLOCKED} when all permits are held.
*/
public int getActiveOperationsCount() {
return indexShardOperationPermits.getActiveOperationsCount();
}
/**
* @return a list of describing each permit that wasn't released yet. The description consist of the debugInfo supplied
* when the permit was acquired plus a stack traces that was captured when the permit was request.
*/
public List<String> getActiveOperations() {
return indexShardOperationPermits.getActiveOperations();
}
private final AsyncIOProcessor<Translog.Location> translogSyncProcessor = new AsyncIOProcessor<Translog.Location>(logger, 1024) {
@Override
protected void write(List<Tuple<Translog.Location, Consumer<Exception>>> candidates) throws IOException {
try {
getEngine().ensureTranslogSynced(candidates.stream().map(Tuple::v1));
} catch (AlreadyClosedException ex) {
// that's fine since we already synced everything on engine close - this also is conform with the methods
// documentation
} catch (IOException ex) { // if this fails we are in deep shit - fail the request
logger.debug("failed to sync translog", ex);
throw ex;
}
}
};
/**
* Syncs the given location with the underlying storage unless already synced. This method might return immediately without
* actually fsyncing the location until the sync listener is called. Yet, unless there is already another thread fsyncing
* the transaction log the caller thread will be hijacked to run the fsync for all pending fsync operations.
* This method allows indexing threads to continue indexing without blocking on fsync calls. We ensure that there is only
* one thread blocking on the sync an all others can continue indexing.
* NOTE: if the syncListener throws an exception when it's processed the exception will only be logged. Users should make sure that the
* listener handles all exception cases internally.
*/
public final void sync(Translog.Location location, Consumer<Exception> syncListener) {
verifyNotClosed();
translogSyncProcessor.put(location, syncListener);
}
public void sync() throws IOException {
verifyNotClosed();
getEngine().syncTranslog();
}
/**
* Checks if the underlying storage sync is required.
*/
public boolean isSyncNeeded() {
return getEngine().isTranslogSyncNeeded();
}
/**
* Returns the current translog durability mode
*/
public Translog.Durability getTranslogDurability() {
return indexSettings.getTranslogDurability();
}
// we can not protect with a lock since we "release" on a different thread
private final AtomicBoolean flushOrRollRunning = new AtomicBoolean();
/**
* Schedules a flush or translog generation roll if needed but will not schedule more than one concurrently. The operation will be
* executed asynchronously on the flush thread pool.
*/
public void afterWriteOperation() {
if (shouldPeriodicallyFlush() || shouldRollTranslogGeneration()) {
if (flushOrRollRunning.compareAndSet(false, true)) {
/*
* We have to check again since otherwise there is a race when a thread passes the first check next to another thread which
* performs the operation quickly enough to finish before the current thread could flip the flag. In that situation, we
* have an extra operation.
*
* Additionally, a flush implicitly executes a translog generation roll so if we execute a flush then we do not need to
* check if we should roll the translog generation.
*/
if (shouldPeriodicallyFlush()) {
logger.debug("submitting async flush request");
final AbstractRunnable flush = new AbstractRunnable() {
@Override
public void onFailure(final Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to flush index", e);
}
}
@Override
protected void doRun() throws IOException {
flush(new FlushRequest());
periodicFlushMetric.inc();
}
@Override
public void onAfter() {
flushOrRollRunning.compareAndSet(true, false);
afterWriteOperation();
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(flush);
} else if (shouldRollTranslogGeneration()) {
logger.debug("submitting async roll translog generation request");
final AbstractRunnable roll = new AbstractRunnable() {
@Override
public void onFailure(final Exception e) {
if (state != IndexShardState.CLOSED) {
logger.warn("failed to roll translog generation", e);
}
}
@Override
protected void doRun() throws Exception {
rollTranslogGeneration();
}
@Override
public void onAfter() {
flushOrRollRunning.compareAndSet(true, false);
afterWriteOperation();
}
};
threadPool.executor(ThreadPool.Names.FLUSH).execute(roll);
} else {
flushOrRollRunning.compareAndSet(true, false);
}
}
}
}
/**
* Build {@linkplain RefreshListeners} for this shard.
*/
private RefreshListeners buildRefreshListeners() {
return new RefreshListeners(
indexSettings::getMaxRefreshListeners,
() -> refresh("too_many_listeners"),
threadPool.executor(ThreadPool.Names.LISTENER)::execute,
logger
);
}
/**
* Simple struct encapsulating a shard failure
*
* @see IndexShard#addShardFailureCallback(Consumer)
*/
public static final class ShardFailure {
public final ShardRouting routing;
public final String reason;
@Nullable
public final Exception cause;
public ShardFailure(ShardRouting routing, String reason, @Nullable Exception cause) {
this.routing = routing;
this.reason = reason;
this.cause = cause;
}
}
Collection<Function<IndexSettings, Optional<EngineFactory>>> engineFactoryProviders() {
return engineFactoryProviders;
}
// for tests
ReplicationTracker getReplicationTracker() {
return replicationTracker;
}
/**
* Executes a scheduled refresh if necessary.
*
* @return <code>true</code> iff the engine got refreshed otherwise <code>false</code>
*/
public boolean scheduledRefresh() {
verifyNotClosed();
boolean listenerNeedsRefresh = refreshListeners.refreshNeeded();
if (isReadAllowed() && (listenerNeedsRefresh || getEngine().refreshNeeded())) {
if (listenerNeedsRefresh == false // if we have a listener that is waiting for a refresh we need to force it
&& isSearchIdle()
&& indexSettings.isExplicitRefresh() == false
&& active.get()) { // it must be active otherwise we might not free up segment memory once the shard became inactive
// lets skip this refresh since we are search idle and
// don't necessarily need to refresh. the next searcher access will register a refreshListener and that will
// cause the next schedule to refresh.
final Engine engine = getEngine();
engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some
setRefreshPending(engine);
return false;
} else {
if (logger.isTraceEnabled()) {
logger.trace("refresh with source [schedule]");
}
return getEngine().maybeRefresh("schedule");
}
}
final Engine engine = getEngine();
engine.maybePruneDeletes(); // try to prune the deletes in the engine if we accumulated some
return false;
}
/**
* Returns true if this shards is search idle
*/
public final boolean isSearchIdle() {
return (threadPool.relativeTimeInMillis() - lastSearcherAccess.get()) >= indexSettings.getSearchIdleAfter().millis();
}
/**
* Returns the last timestamp the searcher was accessed. This is a relative timestamp in milliseconds.
*/
final long getLastSearcherAccess() {
return lastSearcherAccess.get();
}
/**
* Returns true if this shard has some scheduled refresh that is pending because of search-idle.
*/
public final boolean hasRefreshPending() {
return pendingRefreshLocation.get() != null;
}
private void setRefreshPending(Engine engine) {
final Translog.Location lastWriteLocation = engine.getTranslogLastWriteLocation();
pendingRefreshLocation.updateAndGet(curr -> {
if (curr == null || curr.compareTo(lastWriteLocation) <= 0) {
return lastWriteLocation;
} else {
return curr;
}
});
}
private class RefreshPendingLocationListener implements ReferenceManager.RefreshListener {
Translog.Location lastWriteLocation;
@Override
public void beforeRefresh() {
try {
lastWriteLocation = getEngine().getTranslogLastWriteLocation();
} catch (AlreadyClosedException exc) {
// shard is closed - no location is fine
lastWriteLocation = null;
}
}
@Override
public void afterRefresh(boolean didRefresh) {
if (didRefresh && lastWriteLocation != null) {
pendingRefreshLocation.updateAndGet(pendingLocation -> {
if (pendingLocation == null || pendingLocation.compareTo(lastWriteLocation) <= 0) {
return null;
} else {
return pendingLocation;
}
});
}
}
}
/**
* Wait for an idle shard to refresh. Completes immediately if the shard wasn't idle or if there are no pending refresh locations.
*/
public CompletableFuture<Boolean> awaitShardSearchActive() {
CompletableFuture<Boolean> result = new CompletableFuture<>();
awaitShardSearchActive(b -> result.complete(b));
return result;
}
/**
* Registers the given listener and invokes it once the shard is active again and all
* pending refresh translog location has been refreshed. If there is no pending refresh location registered the listener will be
* invoked immediately.
* @param listener the listener to invoke once the pending refresh location is visible. The listener will be called with
* <code>true</code> if the listener was registered to wait for a refresh.
*/
public final void awaitShardSearchActive(Consumer<Boolean> listener) {
markSearcherAccessed(); // move the shard into non-search idle
final Translog.Location location = pendingRefreshLocation.get();
if (location != null) {
addRefreshListener(location, (b) -> {
pendingRefreshLocation.compareAndSet(location, null);
listener.accept(true);
});
} else {
listener.accept(false);
}
}
/**
* Add a listener for refreshes.
*
* @param location the location to listen for
* @param listener for the refresh. Called with true if registering the listener ran it out of slots and forced a refresh. Called with
* false otherwise.
*/
public void addRefreshListener(Translog.Location location, Consumer<Boolean> listener) {
final boolean readAllowed;
if (isReadAllowed()) {
readAllowed = true;
} else {
// check again under postRecoveryMutex. this is important to create a happens before relationship
// between the switch to POST_RECOVERY + associated refresh. Otherwise we may respond
// to a listener before a refresh actually happened that contained that operation.
synchronized (postRecoveryMutex) {
readAllowed = isReadAllowed();
}
}
if (readAllowed) {
refreshListeners.addOrNotify(location, listener);
} else {
// we're not yet ready fo ready for reads, just ignore refresh cycles
listener.accept(false);
}
}
private static class RefreshMetricUpdater implements ReferenceManager.RefreshListener {
private final MeanMetric refreshMetric;
private long currentRefreshStartTime;
private Thread callingThread = null;
private RefreshMetricUpdater(MeanMetric refreshMetric) {
this.refreshMetric = refreshMetric;
}
@Override
public void beforeRefresh() throws IOException {
if (Assertions.ENABLED) {
assert callingThread == null : "beforeRefresh was called by " + callingThread.getName() +
" without a corresponding call to afterRefresh";
callingThread = Thread.currentThread();
}
currentRefreshStartTime = System.nanoTime();
}
@Override
public void afterRefresh(boolean didRefresh) throws IOException {
if (Assertions.ENABLED) {
assert callingThread != null : "afterRefresh called but not beforeRefresh";
assert callingThread == Thread.currentThread() : "beforeRefreshed called by a different thread. current ["
+ Thread.currentThread().getName() + "], thread that called beforeRefresh [" + callingThread.getName() + "]";
callingThread = null;
}
refreshMetric.inc(System.nanoTime() - currentRefreshStartTime);
}
}
private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() {
final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("default");
final DocumentMapper noopDocumentMapper = mapperService == null
? null
: new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService);
return new EngineConfig.TombstoneDocSupplier() {
@Override
public ParsedDocument newDeleteTombstoneDoc(String id) {
return mapperService.documentMapper().createDeleteTombstoneDoc(shardId.getIndexName(), id);
}
@Override
public ParsedDocument newNoopTombstoneDoc(String reason) {
return noopDocumentMapper.createNoopTombstoneDoc(shardId.getIndexName(), reason);
}
};
}
/**
* Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint.
*/
void resetEngineToGlobalCheckpoint() throws IOException {
assert Thread.holdsLock(mutex) == false : "resetting engine under mutex";
assert getActiveOperationsCount() == OPERATIONS_BLOCKED
: "resetting engine without blocking operations; active operations are [" + getActiveOperations() + ']';
sync(); // persist the global checkpoint to disk
final SeqNoStats seqNoStats = seqNoStats();
final TranslogStats translogStats = translogStats();
// flush to make sure the latest commit, which will be opened by the read-only engine, includes all operations.
flush(new FlushRequest().waitIfOngoing(true));
SetOnce<Engine> newEngineReference = new SetOnce<>();
final long globalCheckpoint = getLastKnownGlobalCheckpoint();
assert globalCheckpoint == getLastSyncedGlobalCheckpoint();
synchronized (engineMutex) {
verifyNotClosed();
// we must create both new read-only engine and new read-write engine under engineMutex to ensure snapshotStoreMetadata,
// acquireXXXCommit and close works.
final Engine readOnlyEngine =
new ReadOnlyEngine(newEngineConfig(replicationTracker), seqNoStats, translogStats, false, UnaryOperator.identity(), true) {
@Override
public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
// ignore flushFirst since we flushed above and we do not want to interfere with ongoing translog replay
return newEngineReference.get().acquireLastIndexCommit(false);
}
}
@Override
public IndexCommitRef acquireSafeIndexCommit() {
synchronized (engineMutex) {
if (newEngineReference.get() == null) {
throw new AlreadyClosedException("engine was closed");
}
return newEngineReference.get().acquireSafeIndexCommit();
}
}
@Override
public void close() throws IOException {
assert Thread.holdsLock(engineMutex);
Engine newEngine = newEngineReference.get();
if (newEngine == currentEngineReference.get()) {
// we successfully installed the new engine so do not close it.
newEngine = null;
}
IOUtils.close(super::close, newEngine);
}
};
IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine));
engineFactory = getEngineFactory();
newEngineReference.set(engineFactory.newReadWriteEngine(newEngineConfig(replicationTracker)));
onNewEngine(newEngineReference.get());
}
final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery(
engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {
// TODO: add a dedicate recovery stats for the reset translog
});
newEngineReference.get().recoverFromTranslog(translogRunner, globalCheckpoint);
newEngineReference.get().refresh("reset_engine");
synchronized (engineMutex) {
verifyNotClosed();
IOUtils.close(currentEngineReference.getAndSet(newEngineReference.get()));
// We set active because we are now writing operations to the engine; this way,
// if we go idle after some time and become inactive, we still give sync'd flush a chance to run.
active.set(true);
}
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during
// which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine.
applyEngineSettings();
}
/**
* Returns the maximum sequence number of either update or delete operations have been processed in this shard
* or the sequence number from {@link #advanceMaxSeqNoOfUpdatesOrDeletes(long)}. An index request is considered
* as an update operation if it overwrites the existing documents in Lucene index with the same document id.
* <p>
* The primary captures this value after executes a replication request, then transfers it to a replica before
* executing that replication request on a replica.
*/
public long getMaxSeqNoOfUpdatesOrDeletes() {
return getEngine().getMaxSeqNoOfUpdatesOrDeletes();
}
/**
* A replica calls this method to advance the max_seq_no_of_updates marker of its engine to at least the max_seq_no_of_updates
* value (piggybacked in a replication request) that it receives from its primary before executing that replication request.
* The receiving value is at least as high as the max_seq_no_of_updates on the primary was when any of the operations of that
* replication request were processed on it.
* <p>
* A replica shard also calls this method to bootstrap the max_seq_no_of_updates marker with the value that it received from
* the primary in peer-recovery, before it replays remote translog operations from the primary. The receiving value is at least
* as high as the max_seq_no_of_updates on the primary was when any of these operations were processed on it.
* <p>
* These transfers guarantee that every index/delete operation when executing on a replica engine will observe this marker a value
* which is at least the value of the max_seq_no_of_updates marker on the primary after that operation was executed on the primary.
*
* @see #acquireReplicaOperationPermit(long, long, long, ActionListener, String, Object)
* @see RecoveryTarget#indexTranslogOperations(List, int, long, long, RetentionLeases, long, ActionListener)
*/
public void advanceMaxSeqNoOfUpdatesOrDeletes(long seqNo) {
getEngine().advanceMaxSeqNoOfUpdatesOrDeletes(seqNo);
}
/**
* Performs the pre-closing checks on the {@link IndexShard}.
*
* @throws IllegalStateException if the sanity checks failed
*/
public void verifyShardBeforeIndexClosing() throws IllegalStateException {
getEngine().verifyEngineBeforeIndexClosing();
}
public MeanMetric getFlushMetric() {
return flushMetric;
}
public long periodicFlushCount() {
return periodicFlushMetric.count();
}
private EngineFactory getEngineFactory() {
final IndexMetadata indexMetadata = indexSettings.getIndexMetadata();
if (indexMetadata != null && indexMetadata.getState() == IndexMetadata.State.CLOSE) {
// NoOpEngine takes precedence as long as the index is closed
return NoOpEngine::new;
}
final List<Optional<EngineFactory>> engineFactories =
engineFactoryProviders
.stream()
.map(engineFactoryProvider -> engineFactoryProvider.apply(indexSettings))
.filter(maybe -> Objects.requireNonNull(maybe).isPresent())
.collect(Collectors.toList());
if (engineFactories.isEmpty()) {
return new InternalEngineFactory();
} else if (engineFactories.size() == 1) {
assert engineFactories.get(0).isPresent();
return engineFactories.get(0).get();
} else {
final String message = String.format(
Locale.ROOT,
"multiple engine factories provided for %s: %s",
indexMetadata.getIndex(),
engineFactories
.stream()
.map(t -> {
assert t.isPresent();
return "[" + t.get().getClass().getName() + "]";
})
.collect(Collectors.joining(",")));
throw new IllegalStateException(message);
}
}
}
| crate/crate | server/src/main/java/org/elasticsearch/index/shard/IndexShard.java |
1,488 | package cn.hutool.core.codec;
import java.util.Arrays;
/**
* Base32 - encodes and decodes RFC4648 Base32 (see https://datatracker.ietf.org/doc/html/rfc4648#section-6 )<br>
* base32就是用32(2的5次方)个特定ASCII码来表示256个ASCII码。<br>
* 所以,5个ASCII字符经过base32编码后会变为8个字符(公约数为40),长度增加3/5.不足8n用“=”补足。<br>
* 根据RFC4648 Base32规范,支持两种模式:
* <ul>
* <li>Base 32 Alphabet (ABCDEFGHIJKLMNOPQRSTUVWXYZ234567)</li>
* <li>"Extended Hex" Base 32 Alphabet (0123456789ABCDEFGHIJKLMNOPQRSTUV)</li>
* </ul>
*
* @author Looly
* @since 5.8.0
*/
public class Base32Codec implements Encoder<byte[], String>, Decoder<CharSequence, byte[]> {
public static Base32Codec INSTANCE = new Base32Codec();
@Override
public String encode(byte[] data) {
return encode(data, false);
}
/**
* 编码数据
*
* @param data 数据
* @param useHex 是否使用Hex Alphabet
* @return 编码后的Base32字符串
*/
public String encode(byte[] data, boolean useHex) {
final Base32Encoder encoder = useHex ? Base32Encoder.HEX_ENCODER : Base32Encoder.ENCODER;
return encoder.encode(data);
}
@Override
public byte[] decode(CharSequence encoded) {
return decode(encoded, false);
}
/**
* 解码数据
*
* @param encoded base32字符串
* @param useHex 是否使用Hex Alphabet
* @return 解码后的内容
*/
public byte[] decode(CharSequence encoded, boolean useHex) {
final Base32Decoder decoder = useHex ? Base32Decoder.HEX_DECODER : Base32Decoder.DECODER;
return decoder.decode(encoded);
}
/**
* Bas32编码器
*/
public static class Base32Encoder implements Encoder<byte[], String> {
private static final String DEFAULT_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
private static final String HEX_ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUV";
private static final Character DEFAULT_PAD = '=';
private static final int[] BASE32_FILL = {-1, 4, 1, 6, 3};
public static final Base32Encoder ENCODER = new Base32Encoder(DEFAULT_ALPHABET, DEFAULT_PAD);
public static final Base32Encoder HEX_ENCODER = new Base32Encoder(HEX_ALPHABET, DEFAULT_PAD);
private final char[] alphabet;
private final Character pad;
/**
* 构造
*
* @param alphabet 自定义编码字母表,见 {@link #DEFAULT_ALPHABET}和 {@link #HEX_ALPHABET}
* @param pad 补位字符
*/
public Base32Encoder(String alphabet, Character pad) {
this.alphabet = alphabet.toCharArray();
this.pad = pad;
}
@Override
public String encode(byte[] data) {
int i = 0;
int index = 0;
int digit;
int currByte;
int nextByte;
int encodeLen = data.length * 8 / 5;
if (encodeLen != 0) {
encodeLen = encodeLen + 1 + BASE32_FILL[(data.length * 8) % 5];
}
StringBuilder base32 = new StringBuilder(encodeLen);
while (i < data.length) {
// unsign
currByte = (data[i] >= 0) ? data[i] : (data[i] + 256);
/* Is the current digit going to span a byte boundary? */
if (index > 3) {
if ((i + 1) < data.length) {
nextByte = (data[i + 1] >= 0) ? data[i + 1] : (data[i + 1] + 256);
} else {
nextByte = 0;
}
digit = currByte & (0xFF >> index);
index = (index + 5) % 8;
digit <<= index;
digit |= nextByte >> (8 - index);
i++;
} else {
digit = (currByte >> (8 - (index + 5))) & 0x1F;
index = (index + 5) % 8;
if (index == 0) {
i++;
}
}
base32.append(alphabet[digit]);
}
if (null != pad) {
// 末尾补充不足长度的
while (base32.length() < encodeLen) {
base32.append(pad.charValue());
}
}
return base32.toString();
}
}
/**
* Base32解码器
*/
public static class Base32Decoder implements Decoder<CharSequence, byte[]> {
private static final char BASE_CHAR = '0';
public static final Base32Decoder DECODER = new Base32Decoder(Base32Encoder.DEFAULT_ALPHABET);
public static final Base32Decoder HEX_DECODER = new Base32Decoder(Base32Encoder.HEX_ALPHABET);
private final byte[] lookupTable;
/**
* 构造
*
* @param alphabet 编码字母表
*/
public Base32Decoder(String alphabet) {
lookupTable = new byte[128];
Arrays.fill(lookupTable, (byte) -1);
final int length = alphabet.length();
char c;
for (int i = 0; i < length; i++) {
c = alphabet.charAt(i);
lookupTable[c - BASE_CHAR] = (byte) i;
// 支持小写字母解码
if(c >= 'A' && c <= 'Z'){
lookupTable[Character.toLowerCase(c) - BASE_CHAR] = (byte) i;
}
}
}
@Override
public byte[] decode(CharSequence encoded) {
int i, index, lookup, offset, digit;
final String base32 = encoded.toString();
int len = base32.endsWith("=") ? base32.indexOf("=") * 5 / 8 : base32.length() * 5 / 8;
byte[] bytes = new byte[len];
for (i = 0, index = 0, offset = 0; i < base32.length(); i++) {
lookup = base32.charAt(i) - BASE_CHAR;
/* Skip chars outside the lookup table */
if (lookup < 0 || lookup >= lookupTable.length) {
continue;
}
digit = lookupTable[lookup];
/* If this digit is not in the table, ignore it */
if (digit < 0) {
continue;
}
if (index <= 3) {
index = (index + 5) % 8;
if (index == 0) {
bytes[offset] |= digit;
offset++;
if (offset >= bytes.length) {
break;
}
} else {
bytes[offset] |= digit << (8 - index);
}
} else {
index = (index + 5) % 8;
bytes[offset] |= (digit >>> index);
offset++;
if (offset >= bytes.length) {
break;
}
bytes[offset] |= digit << (8 - index);
}
}
return bytes;
}
}
}
| dromara/hutool | hutool-core/src/main/java/cn/hutool/core/codec/Base32Codec.java |
1,489 | /*
* Copyright 2024 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.pdf.viewer.password;
import android.app.AlertDialog;
import android.app.Dialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.DialogInterface.OnShowListener;
import android.graphics.PorterDuff;
import android.os.Bundle;
import android.text.Editable;
import android.text.TextWatcher;
import android.view.KeyEvent;
import android.view.LayoutInflater;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.View.OnKeyListener;
import android.view.WindowManager.LayoutParams;
import android.view.inputmethod.EditorInfo;
import android.view.inputmethod.InputMethodManager;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.TextView.OnEditorActionListener;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.annotation.RestrictTo;
import androidx.fragment.app.DialogFragment;
import androidx.pdf.R;
import androidx.pdf.util.Accessibility;
/**
* Dialog for querying password for a protected file. The dialog has 2 buttons:
* <ul>
* <li>Exit, exits the application,
* <li>Open, tries to open the document with the given password. If this is not successful, the
* dialog stays up, and offers to try again (the controller should call {@link #retry}).
* If successful, the controller should call {@link #dismiss}.
* </ul>
* <p>
*/
@RestrictTo(RestrictTo.Scope.LIBRARY)
@SuppressWarnings("deprecation")
public abstract class PasswordDialog extends DialogFragment {
private int mTextDefaultColor;
private int mBlueColor;
private int mTextErrorColor;
private AlertDialog mPasswordDialog;
private boolean mIncorrect;
private boolean mFinishOnCancel;
/**
* @param finishOnCancel being true indicates that the activity will be killed when the user
* presses the cancel button on this dialog.
*/
public void setFinishOnCancel(boolean finishOnCancel) {
this.mFinishOnCancel = finishOnCancel;
}
@NonNull
@Override
public Dialog onCreateDialog(@Nullable Bundle savedInstanceState) {
AlertDialog.Builder builder = new AlertDialog.Builder(getActivity());
LayoutInflater inflater = getActivity().getLayoutInflater();
View view = inflater.inflate(R.layout.dialog_password, null);
builder.setTitle(R.string.title_dialog_password)
.setView(view)
.setPositiveButton(R.string.button_open, null)
.setNegativeButton(R.string.button_cancel, null);
final AlertDialog dialog = builder.create();
dialog.getWindow().setSoftInputMode(LayoutParams.SOFT_INPUT_STATE_ALWAYS_VISIBLE);
final EditText passwordField = (EditText) view.findViewById(R.id.password);
setupPasswordField(passwordField);
// Hijack the positive button to NOT dismiss the dialog immediately.
dialog.setOnShowListener(
new OnShowListener() {
@Override
public void onShow(DialogInterface useless) {
// TODO: Track password prompt displayed.
final Button open = dialog.getButton(AlertDialog.BUTTON_POSITIVE);
final Button exit = dialog.getButton(AlertDialog.BUTTON_NEGATIVE);
open.setOnClickListener(
new OnClickListener() {
@Override
public void onClick(View v) {
sendPassword(passwordField);
// TODO: Track password prompt opened.
}
});
exit.setOnClickListener(
new OnClickListener() {
@Override
public void onClick(View v) {
dialog.cancel();
// TODO: Track password prompt exit.
}
});
// Clear red patches on new text
passwordField.addTextChangedListener(
new TextWatcher() {
@Override
public void onTextChanged(CharSequence s, int start, int before,
int count) {
if (mIncorrect) {
clearIncorrect();
}
}
@Override
public void beforeTextChanged(CharSequence s, int start,
int count, int after) {
}
@Override
public void afterTextChanged(Editable s) {
}
});
}
});
mPasswordDialog = dialog;
return dialog;
}
private void setupPasswordField(final EditText passwordField) {
passwordField.setFocusable(true);
passwordField.requestFocus();
// Do not expand the text field to full screen when in landscape.
passwordField.setImeOptions(EditorInfo.IME_ACTION_DONE | EditorInfo.IME_FLAG_NO_EXTRACT_UI);
// Set the open button text with title case.
String openText = getResources().getString(R.string.button_open);
passwordField.setImeActionLabel(openText, EditorInfo.IME_ACTION_DONE);
// Handle 'Enter'
passwordField.setOnKeyListener(new OnKeyListener() {
@Override
public boolean onKey(View v, int keyCode, KeyEvent event) {
if (keyCode == KeyEvent.KEYCODE_ENTER) {
sendPassword(passwordField);
return true;
}
return false;
}
});
// Handle soft keyboard "Done" button.
passwordField.setOnEditorActionListener(new OnEditorActionListener() {
@Override
public boolean onEditorAction(TextView v, int actionId, KeyEvent event) {
if (actionId == EditorInfo.IME_ACTION_DONE) {
sendPassword(passwordField);
return true;
}
return false;
}
});
}
@Override
public void onStart() {
super.onStart();
mTextDefaultColor = getResources().getColor(R.color.text_default);
mTextErrorColor = getResources().getColor(R.color.text_error);
mBlueColor = getResources().getColor(R.color.google_blue);
EditText textField = (EditText) getDialog().findViewById(R.id.password);
textField.getBackground().setColorFilter(mBlueColor, PorterDuff.Mode.SRC_ATOP);
mPasswordDialog.getButton(AlertDialog.BUTTON_NEGATIVE).setTextColor(mBlueColor);
mPasswordDialog.getButton(AlertDialog.BUTTON_POSITIVE).setTextColor(mBlueColor);
showSoftKeyboard(textField);
}
private void showSoftKeyboard(View view) {
if (view.requestFocus()) {
InputMethodManager imm = (InputMethodManager)
getActivity().getSystemService(Context.INPUT_METHOD_SERVICE);
imm.showSoftInput(view, InputMethodManager.SHOW_IMPLICIT);
}
}
@Override
public void onCancel(DialogInterface dialog) {
if (mFinishOnCancel) {
getActivity().finish();
} else {
dismiss();
showErrorOnDialogCancel();
}
}
/** Set the password input by the user. */
public abstract void sendPassword(EditText textField);
/** Show error when user cancels password prompt dialog. */
public abstract void showErrorOnDialogCancel();
/** The given password didn't work, perhaps try again? */
public void retry() {
// TODO: Track incorrect password input.
mIncorrect = true;
EditText textField = (EditText) getDialog().findViewById(R.id.password);
textField.selectAll();
swapBackground(textField, false);
textField.getBackground().setColorFilter(mTextErrorColor, PorterDuff.Mode.SRC_ATOP);
TextView label = (TextView) getDialog().findViewById(R.id.label);
label.setText(R.string.label_password_incorrect);
label.setTextColor(mTextErrorColor);
Accessibility.get().announce(getActivity(), getDialog().getCurrentFocus(),
R.string.desc_password_incorrect_message);
getDialog().findViewById(R.id.password_alert).setVisibility(View.VISIBLE);
}
private void clearIncorrect() {
mIncorrect = false;
TextView label = (TextView) getDialog().findViewById(R.id.label);
label.setText(R.string.label_password_first);
label.setTextColor(mTextDefaultColor);
EditText textField = (EditText) getDialog().findViewById(R.id.password);
swapBackground(textField, true);
getDialog().findViewById(R.id.password_alert).setVisibility(View.GONE);
}
private void swapBackground(EditText textField, boolean reverse) {
if (!reverse) {
textField.setBackground(
getResources().getDrawable(R.drawable.textfield_default_mtrl_alpha));
} else {
EditText sample = new EditText(getActivity());
textField.setBackground(sample.getBackground());
}
}
}
| androidx/androidx | pdf/pdf-viewer/src/main/java/androidx/pdf/viewer/password/PasswordDialog.java |
1,490 | package replicatorg.drivers.gen3;
import java.util.EnumMap;
import java.util.Map;
import java.util.Vector;
import java.util.logging.Level;
import java.util.Arrays;
import org.w3c.dom.Element;
import replicatorg.app.Base;
import replicatorg.drivers.RetryException;
import replicatorg.machine.model.AxisId;
import replicatorg.machine.model.MachineModel;
import replicatorg.machine.model.ToolModel;
import replicatorg.util.Point5d;
import replicatorg.drivers.Version;
public class Makerbot4GDriver extends Sanguino3GDriver {
private boolean accelerationEnabled = false;
private boolean stepperExtruderFanEnabled = false;
public Makerbot4GDriver() {
super();
// This will be overridden by the MightyBoard driver when it extends this class
minimumAccelerationVersion = new Version(3,2);
minimumJettyAccelerationVersion = new Version(3,2);
}
public String getDriverName() {
return "Makerbot4G";
}
@Override
public boolean hasAcceleration() {
if (version.compareTo(getMinimumAccelerationVersion()) < 0)
return false;
return true;
}
@Override
public boolean hasJettyAcceleration() {
if (version.compareTo(getMinimumJettyAccelerationVersion()) < 0)
return false;
return hasAcceleration();
}
public void reset() {
// We should poll the machine for it's state here, but it is more important to have the
// fan on than off.
stepperExtruderFanEnabled = false;
super.reset();
}
public void stop(boolean abort) {
// Record the toolstate as off, so we don't excite the extruder motor in future moves.
machine.currentTool().disableMotor();
// We should stop the fan here, but it will be stopped for us by the super.
stepperExtruderFanEnabled = false;
super.stop(abort);
}
private Iterable<AxisId> getHijackedAxes(int toolhead){
Vector<AxisId> axes = new Vector<AxisId>();
AxisId toolheadAxis = machine.getTool(toolhead).getMotorStepperAxis();
if( extruderHijackedMap.containsKey( toolheadAxis ) )
axes.add(toolheadAxis);
return axes;
}
// /**
// * Returns the hijacked axes for the current tool.
// */
// @Deprecated
// private Iterable<AxisId> getHijackedAxes() {
// Vector<AxisId> axes = new Vector<AxisId>();
//
// for ( Map.Entry<AxisId,ToolModel> entry : stepExtruderMap.entrySet()) {
// ToolModel curTool = machine.currentTool();
// if (curTool.equals(entry.getValue())) {
// axes.add(curTool.getMotorStepperAxis());
// }
// }
// return axes;
// }
/**
* Returns the hijacked axes for all tools.
*/
private Iterable<AxisId> getAllHijackedAxes() {
Vector<AxisId> axes = new Vector<AxisId>();
for ( Map.Entry<AxisId,ToolModel> entry : extruderHijackedMap.entrySet()) {
AxisId axis = entry.getKey();
axes.add(axis);
}
return axes;
}
/** relies on currentTool too much **/
@Deprecated
protected void queueAbsolutePoint(Point5d steps, long micros) throws RetryException {
// Turn on fan if necessary
int toolhead = machine.currentTool().getIndex();
for (AxisId axis : getHijackedAxes(toolhead)) {
if (steps.axis(axis) != 0) {
enableStepperExtruderFan(true,toolhead);
}
}
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.QUEUE_POINT_EXT.getCode());
if (Base.logger.isLoggable(Level.FINE)) {
Base.logger.log(Level.FINE,"Queued absolute point " + steps + " at "
+ Long.toString(micros) + " usec.");
}
// just add them in now.
pb.add32((int) steps.x());
pb.add32((int) steps.y());
pb.add32((int) steps.z());
pb.add32((int) steps.a());
pb.add32((int) steps.b());
pb.add32((int) micros);
runCommand(pb.getPacket());
}
public void setCurrentPosition(Point5d p) throws RetryException {
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.SET_POSITION_EXT.getCode());
Point5d steps = machine.mmToSteps(p);
pb.add32((long) steps.x());
pb.add32((long) steps.y());
pb.add32((long) steps.z());
pb.add32((long) steps.a());
pb.add32((long) steps.b());
Base.logger.log(Level.FINE,"Set current position to " + p + " (" + steps
+ ")");
runCommand(pb.getPacket());
// Set the current position explicitly instead of calling the super, to avoid sending the current position command twice.
currentPosition.set(p);
// super.setCurrentPosition(p);
}
protected Point5d reconcilePosition() {
// If we're writing to a file, we can't actually know what the current position is.
if (fileCaptureOstream != null) {
return null;
}
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.GET_POSITION_EXT.getCode());
PacketResponse pr = runQuery(pb.getPacket());
Point5d steps;
try {
steps = new Point5d(pr.get32(), pr.get32(), pr.get32(), pr.get32(), pr.get32());
} catch(NullPointerException npe) {
Base.logger.log(Level.FINEST, "Invalid response packet");
return null;
}
// Base.logger.fine("Reconciling : "+machine.stepsToMM(steps).toString());
return machine.stepsToMM(steps);
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void enableMotor() throws RetryException {
Base.logger.fine("MakerBot4G.enableMotor()");//REMOVE
machine.currentTool().enableMotor();
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void disableMotor() throws RetryException {
Base.logger.fine("MakerBot4G.enableMotor()"); //REMOVE
machine.currentTool().disableMotor();
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void setMotorSpeedPWM(int pwm) throws RetryException {
machine.currentTool().setMotorSpeedPWM(pwm);
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void setMotorRPM(double rpm, int toolhead) throws RetryException {
if (toolhead == -1) {
machine.currentTool().setMotorSpeedRPM(rpm);
} else {
machine.getTool(toolhead).setMotorSpeedRPM(rpm);
}
}
public void enableDrives() throws RetryException {
enableStepperExtruderFan(true,machine.currentTool().getIndex());
super.enableDrives();
}
public void disableDrives() throws RetryException {
enableStepperExtruderFan(false,machine.currentTool().getIndex());
super.disableDrives();
}
/**
* Due to async command dispatch, this version should not be called.
*/
@Deprecated
public void enableStepperExtruderFan(boolean enabled) throws RetryException {
enableStepperExtruderFan(enabled, machine.currentTool().getIndex());
}
/**
* Will turn on/off the stepper extruder fan if it's not already in the correct state.
*
*/
public void enableStepperExtruderFan(boolean enabled, int toolIndex) throws RetryException {
// Always re-enable the fan when
if (this.stepperExtruderFanEnabled == enabled) return;
// FIXME: Should be called per hijacked axis with the correct tool
// our flag variable starts with motors enabled.
byte flags = (byte) (enabled ? 1 : 0);
// bit 1 determines direction...
flags |= 2;
Base.logger.log(Level.FINE,"Stepper Extruder fan w/flags: "
+ Integer.toBinaryString(flags));
// send it!
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.TOOL_COMMAND.getCode());
pb.add8((byte) toolIndex);
pb.add8(ToolCommandCode.TOGGLE_MOTOR_1.getCode());
pb.add8((byte) 1); // payload length
pb.add8(flags);
runCommand(pb.getPacket());
// Always use max PWM
pb = new PacketBuilder(MotherboardCommandCode.TOOL_COMMAND.getCode());
pb.add8((byte) toolIndex);
pb.add8(ToolCommandCode.SET_MOTOR_1_PWM.getCode());
pb.add8((byte) 1); // length of payload.
pb.add8((byte) 255);
runCommand(pb.getPacket());
this.stepperExtruderFanEnabled = enabled;
}
EnumMap<AxisId,ToolModel> extruderHijackedMap = new EnumMap<AxisId,ToolModel>(AxisId.class);
@Override
/**
* When the machine is set for this driver, some toolheads may poach the an extrusion axis.
*/
public void setMachine(MachineModel m) {
super.setMachine(m);
for (ToolModel tm : m.getTools()) {
Element e = (Element)tm.getXml();
if (e.hasAttribute("stepper_axis")) {
final String stepAxisStr = e.getAttribute("stepper_axis");
try {
AxisId axis = AxisId.valueOf(stepAxisStr.toUpperCase());
if (m.hasAxis(axis)) {
// If we're seizing an axis for an extruder, remove it from the available axes and get
// the data associated with that axis.
// Ted says: but we don't seem to be removing it from the available axes.
// We do that in the 4ga driver, but not here.
extruderHijackedMap.put(axis,tm);
} else {
Base.logger.severe("Tool claims unavailable axis "+axis.name());
}
} catch (IllegalArgumentException iae) {
Base.logger.severe("Unintelligible axis designator "+stepAxisStr);
}
}
}
}
@Override
public EnumMap<AxisId, String> getAxisAlises() {
/// Returns a set of Axes that are overridden or hijacked,
/// and a string to indicate what they are overridden or hijacked for.
EnumMap<AxisId,String> map = new EnumMap<AxisId,String>(AxisId.class);
for ( AxisId id : extruderHijackedMap.keySet() ) {
ToolModel t = extruderHijackedMap.get(id);
map.put(id,t.getName());
}
return map;
}
@Override
public String getMachineType(){ return "Thing-O-Matic/CupCake CNC"; }
/// Read acceleration OFF/ON status from Bot
private void getAccelerationState() {
Base.logger.fine("Geting Acceleration Status from Bot");
accelerationEnabled = 0 != (getAccelerationStatus() & (byte)0x01);
if (accelerationEnabled)
Base.logger.finest("Found accelerated firmware active");
}
/// Looks up a key value based on the machine setting/status.
/// Only used for getting baseline acceleration values for
// Print-O-Matic
@Override
public String getConfigValue(String key, String baseline)
{
//Base.logger.severe("Thing-O-Matic/CupCake CNC fetching from getConfig");
getAccelerationState();
if (accelerationEnabled) {
//Base.logger.severe("Gen4 board is accel");
if ( key.equals("desiredFeedrate") ) return "80";
if ( key.equals("travelFeedrate") ) return "150";
if ( key.equals("printTemp") ) return "240";
} else {
//Base.logger.severe("Gen4 board is not accel");
if ( key.equals("desiredFeedrate") ) return "40";
if ( key.equals("travelFeedrate") ) return "55";
if ( key.equals("printTemp") ) return "220";
}
return baseline;
}
/// read a 32 bit int from EEPROM at location 'offset'
/// NOTE: The equivalent routine in MightyBoard.java fails for a negative-valued integer
private int readInt32FromEEPROM(int offset)
{
byte[] r = readFromEEPROM(offset, 4);
if( r == null || r.length < 4) {
Base.logger.severe("invalid read from read32FromEEPROM at "+ offset);
return 0;
}
int val = (int)r[0] & 0xff;
val += ((int)r[1] & 0xff) << 8;
val += ((int)r[2] & 0xff) << 16;
val += ((int)r[3] & 0x7f) << 24;
if (r[3] < 0)
val = -val;
return val;
}
/// read a 32 bit unsigned int from EEPROM at location 'offset'
private long readUInt32FromEEPROM(int offset)
{
byte[] r = readFromEEPROM(offset, 4);
if( r == null || r.length < 4) {
Base.logger.severe("invalid read from read32FromEEPROM at "+ offset);
return 0;
}
long val = (long)r[0] & 0xffL;
val += ((long)r[1] & 0xffL) << 8;
val += ((long)r[2] & 0xffL) << 16;
val += ((long)r[3] & 0xffL) << 24;
return val;
}
private void writeInt32ToEEPROM(int offset, int value) {
int s = value;
byte buf[] = new byte[4];
for (int i = 0; i < 4; i++) {
buf[i] = (byte) (s & 0xff);
s = s >>> 8;
}
writeToEEPROM(offset, buf);
}
private void writeUInt32ToEEPROM(int offset, long value) {
int v;
// WARNING: you want these L's. A naked 0xffffffff the int known as -1
if (value > 0xffffffffL)
v = 0xffffffff;
else if (value > 0L)
v = (int)(0xffffffffL & value);
else
v = 0;
writeInt32ToEEPROM(offset, v);
}
/// Get a stored unsigned 8bit int from EEPROM
/// Made difficult because Java lacks an unsigned byte and thus when converting from
/// Byte to Int, the value can go unexpectedly negative and change the bits
private int getUInt8EEPROM(int offset) {
byte[] val = readFromEEPROM(offset, 1);
int i = ( val[0] & 0x7f) + (((0x80 & val[0]) != 0) ? (int)0x80 : (int)0);
return i;
}
/// Write an unsigned 8bit value to EEPROM
/// We IGNORE the sign bit in the Int: we do not negate the 8bit value (since it's supposed
/// to be unsigned, eh?). And, if the value is larger than 0xff we set it to 0xff. That
/// way if someone, for instance, enters a temp of 256 we store 255 rather than 0.
private void setUInt8EEPROM(int offset, int val) {
byte b[] = new byte[1];
if (val > 0xff)
val = 0xff;
b[0] = (byte)(0xff & val);
writeToEEPROM(offset, b);
}
/// Get a stored 32bit unsigned int from EEPROM
private long getUInt32EEPROM(int offset) {
return readUInt32FromEEPROM(offset);
}
/// Store a 32bit unsigned int to EEPROM
private void setUInt32EEPROM(int offset, long val) {
writeUInt32ToEEPROM(offset, val);
}
// get stored acceleration status:
// bit 0: OFF (0) or ON (1)
// bit 1: without planner (0) or with planner (1)
// bit 2: unstrangled (0) or strangled (1)
@Override
public byte getAccelerationStatus(){
byte[] val = readFromEEPROM(JettyG3EEPROM.STEPPER_DRIVER, 1);
return val[0];
}
@Override
// set stored acceleration status
// acceleration is applied to all moves, except homing when ON
public void setAccelerationStatus(byte status){
byte b[] = new byte[1];
// Only 3 bits are presently used
status &= (byte)0x07;
// If the accelerated planner is disabled, then force bits 1 and 2 off
// may not be the best idea
if ((byte)0 == (status & (byte)0x01))
status = (byte)0;
b[0] = status;
writeToEEPROM(JettyG3EEPROM.STEPPER_DRIVER, b);
}
// Unhandled: FILAMENT_USED
// Unhandled: FILAMENT_USED_TRIP
// Unhandled: STEPS_PER_MM_A
// Unhandled: STEPS_PER_MM_B
// Unhandled: AXIS_HOME_POSITIONS
// Unhandled: STEPS_PER_MM_Y
// Unhandled: STEPS_PER_MM_X
// Unhandled: STEPS_PER_MM_Z
// Unhandled: MACHINE_NAME
// The "Int" EEPROM parameters are actually uint8_t (aka, unsigned char)
// There's no useful equivalent in Java so we promote these to Int
@Override
public int getEEPROMParamInt(EEPROMParams param) {
switch (param) {
case ABP_COPIES : return getUInt8EEPROM(JettyG3EEPROM.ABP_COPIES);
case AXIS_INVERSION : return getUInt8EEPROM(JettyG3EEPROM.AXIS_INVERSION);
case BUZZER_REPEATS : return getUInt8EEPROM(JettyG3EEPROM.BUZZER_REPEATS);
case ENDSTOPS_USED : return getUInt8EEPROM(JettyG3EEPROM.ENDSTOPS_USED);
case ENDSTOP_INVERSION : return getUInt8EEPROM(JettyG3EEPROM.ENDSTOP_INVERSION);
case ESTOP_CONFIGURATION : return getUInt8EEPROM(JettyG3EEPROM.ESTOP_CONFIGURATION);
case EXTRUDE_DURATION : return getUInt8EEPROM(JettyG3EEPROM.EXTRUDE_DURATION);
case EXTRUDE_MMS : return getUInt8EEPROM(JettyG3EEPROM.EXTRUDE_MMS);
case INVERTED_EXTRUDER_5D : return getUInt8EEPROM(JettyG3EEPROM.INVERTED_EXTRUDER_5D);
case JOG_MODE_SETTINGS : return getUInt8EEPROM(JettyG3EEPROM.JOG_MODE_SETTINGS);
case LCD_TYPE : return getUInt8EEPROM(JettyG3EEPROM.LCD_TYPE);
case MOOD_LIGHT_CUSTOM_BLUE : return getUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_CUSTOM_BLUE);
case MOOD_LIGHT_CUSTOM_GREEN : return getUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_CUSTOM_GREEN);
case MOOD_LIGHT_CUSTOM_RED : return getUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_CUSTOM_RED);
case MOOD_LIGHT_SCRIPT : return getUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_SCRIPT);
case OVERRIDE_GCODE_TEMP : return getUInt8EEPROM(JettyG3EEPROM.OVERRIDE_GCODE_TEMP);
case PLATFORM_TEMP : return getUInt8EEPROM(JettyG3EEPROM.PLATFORM_TEMP);
case PREHEAT_DURING_ESTIMATE : return getUInt8EEPROM(JettyG3EEPROM.PREHEAT_DURING_ESTIMATE);
case STEPPER_DRIVER : return getUInt8EEPROM(JettyG3EEPROM.STEPPER_DRIVER);
case TOOL0_TEMP : return getUInt8EEPROM(JettyG3EEPROM.TOOL0_TEMP);
case TOOL1_TEMP : return getUInt8EEPROM(JettyG3EEPROM.TOOL1_TEMP);
case VERSION_HIGH : return getUInt8EEPROM(JettyG3EEPROM.VERSION_HIGH);
case VERSION_LOW : return getUInt8EEPROM(JettyG3EEPROM.VERSION_LOW);
default :
Base.logger.log(Level.WARNING, "getEEPROMParamInt(" + param + ") call failed");
return 0;
}
}
@Override
public long getEEPROMParamUInt(EEPROMParams param) {
switch (param) {
case ACCEL_CLOCKWISE_EXTRUDER : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_CLOCKWISE_EXTRUDER);
case ACCEL_MAX_ACCELERATION_A : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_A);
case ACCEL_MAX_ACCELERATION_X : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_X);
case ACCEL_MAX_ACCELERATION_Y : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_Y);
case ACCEL_MAX_ACCELERATION_Z : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_Z);
case ACCEL_MAX_EXTRUDER_NORM : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_EXTRUDER_NORM);
case ACCEL_MAX_EXTRUDER_RETRACT : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_EXTRUDER_RETRACT);
case ACCEL_MAX_FEEDRATE_A : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_A);
case ACCEL_MAX_FEEDRATE_B : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_B);
case ACCEL_MAX_FEEDRATE_X : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_X);
case ACCEL_MAX_FEEDRATE_Y : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_Y);
case ACCEL_MAX_FEEDRATE_Z : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_Z);
case ACCEL_MIN_PLANNER_SPEED : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_PLANNER_SPEED);
case ACCEL_REV_MAX_FEED_RATE : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_REV_MAX_FEED_RATE);
case ACCEL_SLOWDOWN_LIMIT : return getUInt32EEPROM(JettyG3EEPROM.ACCEL_SLOWDOWN_LIMIT);
case HOMING_FEED_RATE_X : return getUInt32EEPROM(JettyG3EEPROM.HOMING_FEED_RATE_X);
case HOMING_FEED_RATE_Y : return getUInt32EEPROM(JettyG3EEPROM.HOMING_FEED_RATE_Y);
case HOMING_FEED_RATE_Z : return getUInt32EEPROM(JettyG3EEPROM.HOMING_FEED_RATE_Z);
case RAM_USAGE_DEBUG : return getUInt32EEPROM(JettyG3EEPROM.RAM_USAGE_DEBUG);
default :
Base.logger.log(Level.WARNING, "getEEPROMParamUInt(" + param + ") call failed");
return 0L;
}
}
@Override
public double getEEPROMParamFloat(EEPROMParams param) {
switch (param) {
case ACCEL_ADVANCE_K : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_ADVANCE_K) / 100000.0d;
case ACCEL_ADVANCE_K2 : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_ADVANCE_K2) / 100000.0d;
case ACCEL_EXTRUDER_DEPRIME_A : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_EXTRUDER_DEPRIME) / 10.0d;
case ACCEL_E_STEPS_PER_MM : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_E_STEPS_PER_MM) / 10.0d;
case ACCEL_MAX_SPEED_CHANGE_A : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_A) / 10.0d;
case ACCEL_MAX_SPEED_CHANGE_X : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_X) / 10.0d;
case ACCEL_MAX_SPEED_CHANGE_Y : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_Y) / 10.0d;
case ACCEL_MAX_SPEED_CHANGE_Z : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_Z) / 10.0d;
case ACCEL_MIN_FEED_RATE : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_FEED_RATE) / 10.0d;
case ACCEL_MIN_SEGMENT_TIME : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_SEGMENT_TIME) / 10000.0d;
case ACCEL_MIN_TRAVEL_FEED_RATE : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_TRAVEL_FEED_RATE) / 10.0d;
case ACCEL_NOODLE_DIAMETER : return (double)getUInt32EEPROM(JettyG3EEPROM.ACCEL_NOODLE_DIAMETER) / 100.0d;
default :
Base.logger.log(Level.WARNING, "getEEPROMParamFloat(" + param + ") call failed");
return 0d;
}
}
// Unhandled: FILAMENT_USED
// Unhandled: FILAMENT_USED_TRIP
// Unhandled: STEPS_PER_MM_A
// Unhandled: STEPS_PER_MM_B
// Unhandled: AXIS_HOME_POSITIONS
// Unhandled: STEPS_PER_MM_Y
// Unhandled: STEPS_PER_MM_X
// Unhandled: STEPS_PER_MM_Z
// Unhandled: MACHINE_NAME
@Override
public void setEEPROMParam(EEPROMParams param, int val) {
if (val < 0)
val = 0;
switch (param) {
case ABP_COPIES : setUInt8EEPROM(JettyG3EEPROM.ABP_COPIES, val); break;
case AXIS_INVERSION : setUInt8EEPROM(JettyG3EEPROM.AXIS_INVERSION, val); break;
case BUZZER_REPEATS : setUInt8EEPROM(JettyG3EEPROM.BUZZER_REPEATS, val); break;
case ENDSTOPS_USED : setUInt8EEPROM(JettyG3EEPROM.ENDSTOPS_USED, val); break;
case ENDSTOP_INVERSION : setUInt8EEPROM(JettyG3EEPROM.ENDSTOP_INVERSION, val); break;
case ESTOP_CONFIGURATION : setUInt8EEPROM(JettyG3EEPROM.ESTOP_CONFIGURATION, val); break;
case EXTRUDE_DURATION : setUInt8EEPROM(JettyG3EEPROM.EXTRUDE_DURATION, val); break;
case EXTRUDE_MMS : setUInt8EEPROM(JettyG3EEPROM.EXTRUDE_MMS, val); break;
case INVERTED_EXTRUDER_5D : setUInt8EEPROM(JettyG3EEPROM.INVERTED_EXTRUDER_5D, val); break;
case JOG_MODE_SETTINGS : setUInt8EEPROM(JettyG3EEPROM.JOG_MODE_SETTINGS, val); break;
case LCD_TYPE : setUInt8EEPROM(JettyG3EEPROM.LCD_TYPE, val); break;
case MOOD_LIGHT_CUSTOM_BLUE : setUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_CUSTOM_BLUE, val); break;
case MOOD_LIGHT_CUSTOM_GREEN : setUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_CUSTOM_GREEN, val); break;
case MOOD_LIGHT_CUSTOM_RED : setUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_CUSTOM_RED, val); break;
case MOOD_LIGHT_SCRIPT : setUInt8EEPROM(JettyG3EEPROM.MOOD_LIGHT_SCRIPT, val); break;
case OVERRIDE_GCODE_TEMP : setUInt8EEPROM(JettyG3EEPROM.OVERRIDE_GCODE_TEMP, val); break;
case PLATFORM_TEMP : setUInt8EEPROM(JettyG3EEPROM.PLATFORM_TEMP, val); break;
case PREHEAT_DURING_ESTIMATE : setUInt8EEPROM(JettyG3EEPROM.PREHEAT_DURING_ESTIMATE, val); break;
case STEPPER_DRIVER : setUInt8EEPROM(JettyG3EEPROM.STEPPER_DRIVER, val); break;
case TOOL0_TEMP : setUInt8EEPROM(JettyG3EEPROM.TOOL0_TEMP, val); break;
case TOOL1_TEMP : setUInt8EEPROM(JettyG3EEPROM.TOOL1_TEMP, val); break;
case VERSION_HIGH : setUInt8EEPROM(JettyG3EEPROM.VERSION_HIGH, val); break;
case VERSION_LOW : setUInt8EEPROM(JettyG3EEPROM.VERSION_LOW, val); break;
default : Base.logger.log(Level.WARNING, "setEEPROMParam(" + param + ", " + val + ") call failed"); break;
}
}
@Override
public void setEEPROMParam(EEPROMParams param, long val) {
if (val < 0L)
val = 0L;
switch (param) {
case ACCEL_CLOCKWISE_EXTRUDER : setUInt32EEPROM(JettyG3EEPROM.ACCEL_CLOCKWISE_EXTRUDER, val); break;
case ACCEL_MAX_ACCELERATION_A : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_A, val); break;
case ACCEL_MAX_ACCELERATION_X : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_X, val); break;
case ACCEL_MAX_ACCELERATION_Y : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_Y, val); break;
case ACCEL_MAX_ACCELERATION_Z : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_ACCELERATION_Z, val); break;
case ACCEL_MAX_EXTRUDER_NORM : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_EXTRUDER_NORM, val); break;
case ACCEL_MAX_EXTRUDER_RETRACT : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_EXTRUDER_RETRACT, val); break;
case ACCEL_MAX_FEEDRATE_A : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_A, val); break;
case ACCEL_MAX_FEEDRATE_B : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_B, val); break;
case ACCEL_MAX_FEEDRATE_X : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_X, val); break;
case ACCEL_MAX_FEEDRATE_Y : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_Y, val); break;
case ACCEL_MAX_FEEDRATE_Z : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_FEEDRATE_Z, val); break;
case ACCEL_MIN_PLANNER_SPEED : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_PLANNER_SPEED, val); break;
case ACCEL_REV_MAX_FEED_RATE : setUInt32EEPROM(JettyG3EEPROM.ACCEL_REV_MAX_FEED_RATE, val); break;
case ACCEL_SLOWDOWN_LIMIT : setUInt32EEPROM(JettyG3EEPROM.ACCEL_SLOWDOWN_LIMIT, val); break;
case HOMING_FEED_RATE_X : setUInt32EEPROM(JettyG3EEPROM.HOMING_FEED_RATE_X, val); break;
case HOMING_FEED_RATE_Y : setUInt32EEPROM(JettyG3EEPROM.HOMING_FEED_RATE_Y, val); break;
case HOMING_FEED_RATE_Z : setUInt32EEPROM(JettyG3EEPROM.HOMING_FEED_RATE_Z, val); break;
case RAM_USAGE_DEBUG : setUInt32EEPROM(JettyG3EEPROM.RAM_USAGE_DEBUG, val); break;
default : Base.logger.log(Level.WARNING, "setEEPROMParam(" + param + ", " + val + ") call failed"); break;
}
}
@Override
public void setEEPROMParam(EEPROMParams param, double val) {
if (val < 0.0d)
val = 0.0d;
switch (param) {
case ACCEL_ADVANCE_K : setUInt32EEPROM(JettyG3EEPROM.ACCEL_ADVANCE_K, (long)(val * 100000.0d)); break;
case ACCEL_ADVANCE_K2 : setUInt32EEPROM(JettyG3EEPROM.ACCEL_ADVANCE_K2, (long)(val * 100000.0d)); break;
case ACCEL_EXTRUDER_DEPRIME_A : setUInt32EEPROM(JettyG3EEPROM.ACCEL_EXTRUDER_DEPRIME, (long)(val * 10.0d)); break;
case ACCEL_E_STEPS_PER_MM : setUInt32EEPROM(JettyG3EEPROM.ACCEL_E_STEPS_PER_MM, (long)(val * 10.0d)); break;
case ACCEL_MAX_SPEED_CHANGE_A : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_A, (long)(val * 10.0d)); break;
case ACCEL_MAX_SPEED_CHANGE_X : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_X, (long)(val * 10.0d)); break;
case ACCEL_MAX_SPEED_CHANGE_Y : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_Y, (long)(val * 10.0d)); break;
case ACCEL_MAX_SPEED_CHANGE_Z : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MAX_SPEED_CHANGE_Z, (long)(val * 10.0d)); break;
case ACCEL_MIN_FEED_RATE : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_FEED_RATE, (long)(val * 10.0d)); break;
case ACCEL_MIN_SEGMENT_TIME : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_SEGMENT_TIME, (long)(val * 10000.0d)); break;
case ACCEL_MIN_TRAVEL_FEED_RATE : setUInt32EEPROM(JettyG3EEPROM.ACCEL_MIN_TRAVEL_FEED_RATE, (long)(val * 10.0d)); break;
case ACCEL_NOODLE_DIAMETER : setUInt32EEPROM(JettyG3EEPROM.ACCEL_NOODLE_DIAMETER, (long)(val * 100.0d)); break;
default : Base.logger.log(Level.WARNING, "setEEPROMParam(" + param + ", " + val + ") call failed"); break;
}
}
/**
* Reset to the factory state
* @throws RetryException
*/
@Override
public void resetSettingsToFactory() throws RetryException {
Base.logger.finer("resetting to factory in Makerbot4G");
if (hasAcceleration()) {
/// Send message to the firmware to restore EEPROM parameters to their default values
/// Not reset are the filament counters and the home offsets
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.RESET_TO_FACTORY.getCode());
pb.add8((byte) 0xFF); // reserved byte in payload
PacketResponse pr = runCommand(pb.getPacket());
}
else
super.resetSettingsToBlank();
}
}
| rparkins999/ReplicatorG | src/replicatorg/drivers/gen3/Makerbot4GDriver.java |
1,491 | // Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
/*
* This is adapted from the kstemmer code base which is Copyright 2003, CIIR University of Massachusetts
* Amherst (http://ciir.cs.umass.edu) and Licensed under the terms of a modified old-style BSD license.
*/
package com.yahoo.language.simple.kstem;
/** A list of words used by Kstem
*/
class KStemData4 {
private KStemData4() {
}
static String[] data = {
"granular","granulate","granule","grape","grapefruit",
"grapeshot","grapevine","graph","graphic","graphical",
"graphically","graphite","graphology","grapnel","grapple",
"grasp","grasping","grass","grasshopper","grassland",
"grassy","grate","grateful","grater","gratification",
"gratify","gratifying","grating","gratis","gratitude",
"gratuitous","gratuity","grave","gravel","gravelly",
"gravestone","graveyard","gravitate","gravitation","gravity",
"gravure","gravy","gray","graybeard","grayish",
"graze","grease","greasepaint","greaseproof","greaser",
"greasy","great","greatcoat","greater","greatly",
"grebe","grecian","greed","greedy","green",
"greenback","greenery","greenfly","greengage","greengrocer",
"greenhorn","greenhouse","greenish","greenroom","greens",
"greenwood","greet","greeting","gregarious","gremlin",
"grenade","grenadier","grenadine","grew","grey",
"greybeard","greyhound","greyish","grid","griddle",
"gridiron","grief","grievance","grieve","grievous",
"griffin","grill","grim","grimace","grime",
"grimy","grin","grind","grinder","grindstone",
"gringo","grip","gripe","gripes","gripping",
"grisly","grist","gristle","grit","grits",
"grizzle","grizzled","groan","groat","groats",
"grocer","groceries","grocery","grog","groggy",
"groin","groom","groove","groover","groovy",
"grope","gropingly","gross","grotesque","grotto",
"grotty","grouch","ground","grounding","groundless",
"groundnut","grounds","groundsel","groundsheet","groundsman",
"groundwork","group","groupie","grouping","grouse",
"grove","grovel","grow","grower","growl",
"grown","growth","groyne","grub","grubby",
"grudge","grudging","gruel","grueling","gruelling",
"gruesome","gruff","grumble","grumbling","grumpy",
"grundyism","grunt","gryphon","guano","guarantee",
"guarantor","guaranty","guard","guarded","guardhouse",
"guardian","guardianship","guardrail","guardroom","guardsman",
"guava","gubernatorial","gudgeon","guerilla","guerrilla",
"guess","guesswork","guest","guesthouse","guestroom",
"guffaw","guidance","guide","guidelines","guild",
"guilder","guildhall","guile","guileless","guillemot",
"guillotine","guilt","guilty","guinea","guipure",
"guise","guitar","gulch","gulden","gulf",
"gull","gullet","gulley","gullible","gully",
"gulp","gum","gumbo","gumboil","gumboot",
"gumdrop","gummy","gumption","gun","gunboat",
"gundog","gunfire","gunge","gunman","gunmetal",
"gunnel","gunner","gunnery","gunnysack","gunpoint",
"gunpowder","gunrunner","gunshot","gunshy","gunsmith",
"gunwale","guppy","gurgle","guru","gush",
"gusher","gushing","gushy","gusset","gust",
"gustatory","gusto","gusty","gut","gutless",
"guts","gutsy","gutter","guttersnipe","guttural",
"guv","guvnor","guy","guzzle","gym",
"gymkhana","gymnasium","gymnast","gymnastic","gymnastics",
"gymslip","gynaecology","gynecology","gyp","gypsum",
"gypsy","gyrate","gyration","gyroscope","gyves",
"haberdasher","haberdashery","habiliment","habit","habitable",
"habitat","habitation","habitual","habituate","hacienda",
"hack","hackles","hackney","hackneyed","hacksaw",
"hackwork","had","haddock","hadji","haft",
"hag","haggard","haggis","haggle","hagiography",
"haiku","hail","hailstone","hailstorm","hair",
"hairbrush","haircut","hairdo","hairdresser","hairgrip",
"hairless","hairline","hairnet","hairpiece","hairpin",
"hairspring","hairy","hajji","hake","halberd",
"halcyon","hale","half","halfback","halfpence",
"halfpenny","halfpennyworth","halftone","halfway","halibut",
"halitosis","hall","halleluja","halliard","hallmark",
"hallo","hallow","hallstand","hallucinate","hallucination",
"hallucinatory","hallucinogenic","hallway","halma","halo",
"halt","halter","halterneck","halting","halve",
"halves","halyard","ham","hamadryad","hamburger",
"hamlet","hammer","hammock","hamper","hamster",
"hamstring","hand","handbag","handball","handbarrow",
"handbill","handbook","handbrake","handcart","handclap",
"handcuff","handcuffs","handful","handgun","handhold",
"handicap","handicraft","handiwork","handkerchief","handle",
"handlebars","handler","handloom","handmade","handmaiden",
"handout","handpick","handrail","handshake","handsome",
"handstand","handwork","handwriting","handwritten","handy",
"handyman","hang","hangar","hangdog","hanger",
"hanging","hangings","hangman","hangnail","hangout",
"hangover","hangup","hank","hanker","hankering",
"hankie","hanky","hansard","hansom","hap",
"haphazard","hapless","haply","happen","happening",
"happily","happiness","happy","harangue","harass",
"harassment","harbinger","harbor","harbour","hard",
"hardback","hardboard","hardbound","harden","hardheaded",
"hardihood","hardiness","hardly","hardness","hardship",
"hardtop","hardware","hardwearing","hardwood","hardy",
"hare","harebell","harebrained","harelip","harem",
"haricot","hark","harlequin","harlequinade","harlot",
"harm","harmless","harmonic","harmonica","harmonise",
"harmonium","harmonize","harmony","harness","harp",
"harpoon","harpsichord","harpy","harquebus","harridan",
"harrier","harrow","harrowing","harry","harsh",
"hart","hartal","hartebeest","harvest","harvester",
"has","hash","hashish","hasp","hassle",
"hassock","hast","haste","hasten","hasty",
"hat","hatband","hatch","hatchback","hatchery",
"hatchet","hatching","hatchway","hate","hateful",
"hath","hatless","hatpin","hatred","hatter",
"hauberk","haughty","haul","haulage","haulier",
"haulm","haunch","haunt","haunting","hautbois",
"hautboy","hauteur","havana","have","haven",
"haver","haversack","haves","havoc","haw",
"hawk","hawker","hawser","hawthorn","hay",
"haycock","hayfork","haymaker","haystack","haywire",
"hazard","hazardous","haze","hazel","hazy",
"head","headache","headband","headboard","headcheese",
"headdress","header","headfirst","headgear","headhunter",
"heading","headland","headless","headlight","headline",
"headlong","headman","headmaster","headphone","headpiece",
"headquarters","headrest","headroom","headset","headship",
"headshrinker","headstall","headstone","headstrong","headway",
"headwind","headword","heady","heal","health",
"healthful","healthy","heap","hear","hearer",
"hearing","hearken","hearsay","hearse","heart",
"heartache","heartbeat","heartbreak","heartbreaking","heartbroken",
"heartburn","hearten","heartening","heartfelt","hearth",
"hearthrug","heartily","heartless","heartrending","heartsease",
"heartsick","heartstrings","heartthrob","heartwarming","heartwood",
"hearty","heat","heated","heater","heath",
"heathen","heather","heating","heatstroke","heave",
"heaven","heavenly","heavenwards","heavy","heavyhearted",
"heavyweight","hebdomadal","hebraic","hebrew","hecatomb",
"heck","heckle","hectare","hectic","hector",
"hedge","hedgehog","hedgehop","hedgerow","hedonism",
"heed","heel","heelball","hefty","hegemony",
"hegira","heifer","height","heighten","heinous",
"heir","heiress","heirloom","hejira","held",
"helicopter","heliograph","heliotrope","heliport","helium",
"hell","hellcat","hellene","hellenic","hellenistic",
"hellish","hellishly","hello","helm","helmet",
"helmeted","helmsman","helot","help","helpful",
"helping","helpless","helpmate","helve","hem",
"hemisphere","hemline","hemlock","hemoglobin","hemophilia",
"hemophiliac","hemorrhage","hemorrhoid","hemp","hempen",
"hemstitch","hen","henbane","hence","henceforth",
"henchman","henna","hennaed","henpecked","hepatitis",
"heptagon","her","herald","heraldic","heraldry",
"herb","herbaceous","herbage","herbal","herbalist",
"herbivorous","herculean","herd","herdsman","here",
"hereabouts","hereafter","hereby","hereditament","hereditary",
"heredity","herein","hereinafter","hereof","heresy",
"heretic","hereto","heretofore","hereunder","hereupon",
"herewith","heritable","heritage","hermaphrodite","hermetic",
"hermit","hermitage","hernia","hero","heroic",
"heroics","heroin","heroism","heron","heronry",
"herpes","herr","herring","herringbone","hers",
"herself","hertz","hesitancy","hesitant","hesitate",
"hesitation","hesperus","hessian","heterodox","heterodoxy",
"heterogeneous","heterosexual","heuristic","heuristics","hew",
"hewer","hex","hexagon","hexagram","hexameter",
"hey","heyday","hiatus","hibernate","hibiscus",
"hiccough","hiccup","hick","hickory","hide",
"hideaway","hidebound","hideous","hiding","hie",
"hierarchy","hieroglyph","hieroglyphics","high","highball",
"highborn","highboy","highbrow","higher","highfalutin",
"highland","highlander","highlands","highlight","highly",
"highness","highpitched","highroad","highway","highwayman",
"hijack","hike","hilarious","hilarity","hill",
"hillbilly","hillock","hillside","hilly","hilt",
"him","himself","hind","hinder","hindmost",
"hindquarters","hindrance","hindsight","hindu","hinduism",
"hinge","hint","hinterland","hip","hipbath",
"hippie","hippodrome","hippopotamus","hippy","hipster",
"hire","hireling","hirsute","his","hiss",
"hist","histamine","histology","historian","historic",
"historical","history","histrionic","histrionics","hit",
"hitch","hitchhike","hither","hitherto","hive",
"hives","hms","hoard","hoarding","hoarfrost",
"hoarse","hoary","hoax","hob","hobble",
"hobbledehoy","hobby","hobbyhorse","hobgoblin","hobnail",
"hobnob","hobo","hock","hockey","hod",
"hodgepodge","hoe","hog","hoggish","hogmanay",
"hogshead","hogwash","hoist","hold","holdall",
"holder","holding","holdover","holdup","hole",
"holiday","holidaymaker","holiness","holler","hollow",
"holly","hollyhock","hollywood","holocaust","holograph",
"holstein","holster","holy","homage","homburg",
"home","homecoming","homegrown","homeland","homelike",
"homely","homemade","homeopath","homeopathy","homeric",
"homesick","homespun","homestead","hometown","homeward",
"homewards","homework","homey","homicidal","homicide",
"homiletic","homiletics","homily","homing","hominy",
"homoeopath","homoeopathy","homogeneous","homogenise","homogenize",
"homograph","homonym","homophone","homosexual","homy",
"hone","honest","honestly","honesty","honey",
"honeybee","honeycomb","honeycombed","honeydew","honeyed",
"honeymoon","honeysuckle","honk","honkie","honky",
"honor","honorable","honorarium","honorary","honorific",
"honors","honour","honourable","honours","hooch",
"hood","hooded","hoodlum","hoodoo","hoodwink",
"hooey","hoof","hook","hookah","hooked",
"hooker","hookey","hookup","hookworm","hooky",
"hooligan","hoop","hooray","hoot","hooter",
"hoover","hooves","hop","hope","hopeful",
"hopefully","hopeless","hopper","hopscotch","horde",
"horizon","horizontal","hormone","horn","hornbeam",
"hornbill","horned","hornet","hornpipe","horny",
"horology","horoscope","horrendous","horrible","horrid",
"horrific","horrify","horror","horrors","horse",
"horseback","horsebox","horseflesh","horsefly","horsehair",
"horselaugh","horseman","horsemanship","horsemeat","horseplay",
"horsepower","horseracing","horseradish","horseshit","horseshoe",
"horsewhip","horsewoman","horsy","hortative","horticulture",
"hosanna","hose","hosier","hosiery","hospice",
"hospitable","hospital","hospitalise","hospitality","hospitalize",
"host","hostage","hostel","hosteler","hosteller",
"hostelry","hostess","hostile","hostilities","hostility",
"hostler","hot","hotbed","hotchpotch","hotel",
"hotelier","hotfoot","hothead","hothouse","hotly",
"hotplate","hotpot","hottentot","hound","hour",
"hourglass","houri","hourly","house","houseboat",
"housebound","houseboy","housebreaker","housebroken","housecoat",
"housecraft","housedog","housefather","housefly","houseful",
"household","householder","housekeeper","housekeeping","housemaid",
"houseman","housemaster","housemother","houseroom","housetops",
"housewarming","housewife","housewifery","housework","housing",
"hove","hovel","hover","hovercraft","how",
"howdah","howdy","however","howitzer","howl",
"howler","howling","howsoever","hoyden","hrh",
"hub","hubbub","hubby","hubcap","hubris",
"huckaback","huckleberry","huckster","huddle","hue",
"huff","huffish","huffy","hug","huge",
"hugely","huguenot","huh","hula","hulk",
"hulking","hull","hullabaloo","hullo","hum",
"human","humane","humanise","humanism","humanitarian",
"humanitarianism","humanities","humanity","humanize","humankind",
"humanly","humble","humbug","humdinger","humdrum",
"humerus","humid","humidify","humidity","humidor",
"humiliate","humility","hummingbird","hummock","humor",
"humorist","humorous","humour","hump","humpback",
"humph","humus","hun","hunch","hunchback",
"hundred","hundredweight","hung","hunger","hungry",
"hunk","hunkers","hunt","hunter","hunting",
"huntress","huntsman","hurdle","hurl","hurling",
"hurray","hurricane","hurried","hurry","hurt",
"hurtful","hurtle","husband","husbandman","husbandry",
"hush","husk","husky","hussar","hussy",
"hustings","hustle","hustler","hut","hutch",
"hutment","huzza","huzzah","hyacinth","hyaena",
"hybrid","hybridise","hybridize","hydra","hydrangea",
"hydrant","hydrate","hydraulic","hydraulics","hydrocarbon",
"hydroelectric","hydrofoil","hydrogen","hydrophobia","hydroplane",
"hydroponics","hydrotherapy","hyena","hygiene","hygienic",
"hymen","hymeneal","hymn","hymnal","hyperbola",
"hyperbole","hyperbolic","hypercritical","hypermarket","hypersensitive",
"hyphen","hyphenate","hypnosis","hypnotise","hypnotism",
"hypnotist","hypnotize","hypo","hypochondria","hypochondriac",
"hypocrisy","hypocrite","hypodermic","hypotenuse","hypothermia",
"hypothesis","hypothetical","hysterectomy","hysteria","hysterical",
"hysterics","iamb","iberian","ibex","ibidem",
"ibis","icbm","ice","iceberg","icebound",
"icebox","icebreaker","icefall","icehouse","iceman",
"icicle","icing","icon","iconoclast","icy",
"idea","ideal","idealise","idealism","idealist",
"idealize","ideally","idem","identical","identification",
"identify","identikit","identity","ideogram","ideology",
"ides","idiocy","idiom","idiomatic","idiosyncrasy",
"idiot","idle","idol","idolater","idolatrous",
"idolatry","idolise","idolize","idyl","idyll",
"igloo","igneous","ignite","ignition","ignoble",
"ignominious","ignominy","ignoramus","ignorance","ignorant",
"ignore","iguana","ikon","ilex","ilk",
"ill","illegal","illegality","illegible","illegitimate",
"illiberal","illicit","illimitable","illiterate","illness",
"illogical","illuminate","illuminating","illumination","illuminations",
"illusion","illusionist","illusory","illustrate","illustration",
"illustrative","illustrator","illustrious","image","imagery",
"imaginable","imaginary","imagination","imaginative","imagine",
"imam","imbalance","imbecile","imbecility","imbed",
"imbibe","imbroglio","imbue","imitate","imitation",
"imitative","imitator","immaculate","immanence","immanent",
"immaterial","immature","immeasurable","immediacy","immediate",
"immediately","immemorial","immense","immensely","immensity",
"immerse","immersion","immigrant","immigrate","imminence",
"imminent","immobile","immobilise","immobilize","immoderate",
"immodest","immolate","immoral","immorality","immortal",
"immortalise","immortality","immortalize","immovable","immune",
"immunise","immunize","immure","immutable","imp",
"impact","impacted","impair","impala","impale",
"impalpable","impanel","impart","impartial","impassable",
"impasse","impassioned","impassive","impatience","impatient",
"impeach","impeccable","impecunious","impedance","impede",
"impediment","impedimenta","impel","impending","impenetrable",
"impenitent","imperative","imperceptible","imperfect","imperial",
"imperialism","imperialist","imperialistic","imperil","imperious",
"imperishable","impermanent","impermeable","impersonal","impersonate",
"impertinent","imperturbable","impervious","impetigo","impetuous",
"impetus","impiety","impinge","impious","impish",
"implacable","implant","implement","implicate","implication",
"implicit","implore","implosion","imply","impolite",
"impolitic","imponderable","import","importance","important",
"importation","importunate","importune","impose","imposing",
"imposition","impossible","impostor","imposture","impotent",
"impound","impoverish","impracticable","impractical","imprecation",
"impregnable","impregnate","impresario","impress","impression",
"impressionable","impressionism","impressionist","impressionistic","impressive",
"imprimatur","imprint","imprison","improbability","improbable",
"impromptu","improper","impropriety","improve","improvement",
"improvident","improvise","imprudent","impudent","impugn",
"impulse","impulsion","impulsive","impunity","impure",
"impurity","imputation","impute","inability","inaccessible",
"inaccurate","inaction","inactive","inadequacy","inadequate",
"inadmissible","inadvertent","inalienable","inamorata","inane",
"inanimate","inanition","inanity","inapplicable","inappropriate",
"inapt","inaptitude","inarticulate","inartistic","inattention",
"inattentive","inaudible","inaugural","inaugurate","inauspicious",
"inboard","inborn","inbound","inbred","inbreeding",
"inc","incalculable","incandescent","incantation","incapable",
"incapacitate","incapacity","incarcerate","incarnate","incarnation",
"incautious","incendiarism","incendiary","incense","incentive",
"inception","incertitude","incessant","incest","incestuous",
"inch","inchoate","incidence","incident","incidental",
"incidentally","incidentals","incinerate","incinerator","incipience",
"incipient","incise","incision","incisive","incisor",
"incite","incivility","inclement","inclination","incline",
"inclined","inclose","inclosure","include","included",
"including","inclusion","inclusive","incognito","incoherent",
"incombustible","income","incoming","incommensurable","incommensurate",
"incommode","incommodious","incommunicable","incommunicado","incommunicative",
"incomparable","incompatible","incompetence","incompetent","incomplete",
"incomprehensible","incomprehensibly","incomprehension","inconceivable","inconclusive",
"incongruity","incongruous","inconsequent","inconsequential","inconsiderable",
"inconsiderate","inconsistent","inconsolable","inconspicuous","inconstant",
"incontestable","incontinent","incontrovertible","inconvenience","inconvenient",
"incorporate","incorporated","incorporeal","incorrect","incorrigible",
"incorruptible","increase","increasingly","incredible","incredulity",
"incredulous","increment","incriminate","incrust","incrustation",
"incubate","incubation","incubator","incubus","inculcate",
"inculpate","incumbency","incumbent","incur","incurable",
"incurious","incursion","incurved","indebted","indecent",
"indecipherable","indecision","indecisive","indecorous","indecorum",
"indeed","indefatigable","indefensible","indefinable","indefinite",
"indefinitely","indelible","indelicate","indemnification","indemnify",
"indemnity","indent","indentation","indenture","independence",
"independent","indescribable","indestructible","indeterminable","indeterminate",
"index","indian","indicate","indication","indicative",
"indicator","indices","indict","indictable","indifferent",
"indigenous","indigent","indigestible","indigestion","indignant",
"indignation","indignity","indigo","indirect","indiscernible",
"indiscipline","indiscreet","indiscretion","indiscriminate","indispensable",
"indisposed","indisposition","indisputable","indissoluble","indistinct",
"indistinguishable","individual","individualise","individualism","individuality",
"individualize","individually","indivisible","indocile","indoctrinate",
"indolent","indomitable","indoor","indoors","indorse",
"indrawn","indubitable","induce","inducement","induct",
"induction","inductive","indue","indulge","indulgence",
"indulgent","industrial","industrialise","industrialism","industrialist",
"industrialize","industrious","industry","inebriate","inedible",
"ineducable","ineffable","ineffaceable","ineffective","ineffectual",
"inefficient","inelastic","inelegant","ineligible","ineluctable",
"inept","ineptitude","inequality","inequitable","inequity",
"ineradicable","inert","inertia","inescapable","inessential",
"inestimable","inevitable","inexact","inexactitude","inexcusable",
"inexhaustible","inexorable","inexpediency","inexpedient","inexpensive",
"inexperience","inexperienced","inexpert","inexpiable","inexplicable",
"inexplicably","inexpressible","inextinguishable","inextricable","infallible",
"infallibly","infamous","infamy","infancy","infant",
"infanticide","infantile","infantry","infantryman","infatuated",
"infatuation","infect","infection","infectious","infelicitous",
"infer","inference","inferential","inferior","infernal",
"inferno","infertile","infest","infidel","infidelity",
"infield","infighting","infiltrate","infiltration","infinite",
"infinitesimal","infinitive","infinitude","infinity","infirm",
"infirmary","infirmity","inflame","inflamed","inflammable",
"inflammation","inflammatory","inflatable","inflate","inflated",
"inflation","inflationary","inflect","inflection","inflexible",
"inflexion","inflict","infliction","inflow","influence",
"influential","influenza","influx","info","inform",
"informal","informant","information","informative","informed",
"informer","infra","infraction","infrared","infrastructure",
"infrequent","infringe","infuriate","infuse","infusion",
"ingathering","ingenious","ingenuity","ingenuous","ingest",
"inglenook","inglorious","ingoing","ingot","ingraft",
"ingrained","ingratiate","ingratiating","ingratitude","ingredient",
"ingress","ingrown","inhabit","inhabitant","inhale",
"inhaler","inharmonious","inhere","inherent","inherently",
"inherit","inheritance","inhibit","inhibited","inhibition",
"inhospitable","inhuman","inhumane","inhumanity","inimical",
"inimitable","iniquitous","iniquity","initial","initially",
"initiate","initiation","initiative","inject","injection",
"injudicious","injunction","injure","injurious","injury",
"injustice","ink","inkbottle","inkling","inkpad",
"inkstand","inkwell","inky","inlaid","inland",
"inlay","inlet","inmate","inmost","inn",
"innards","innate","inner","inning","innings",
"innkeeper","innocent","innocuous","innovate","innovation",
"innuendo","innumerable","inoculate","inoffensive","inoperable",
"inoperative","inopportune","inordinate","inorganic","input",
"inquest","inquietude","inquire","inquiring","inquiry",
"inquisition","inquisitive","inquisitor","inquisitorial","inroad",
"inrush","insalubrious","insane","insanitary","insanity",
"insatiable","insatiate","inscribe","inscription","inscrutable",
"insect","insecticide","insectivore","insectivorous","insecure",
"inseminate","insemination","insensate","insensibility","insensible",
"insensitive","inseparable","insert","insertion","inset",
"inshore","inside","insider","insidious","insight",
"insignia","insignificant","insincere","insinuate","insinuation",
"insipid","insist","insistence","insistency","insistent",
"insole","insolent","insoluble","insolvable","insolvent",
"insomnia","insomniac","insouciance","inspect","inspection",
"inspector","inspectorate","inspectorship","inspiration","inspire",
"inspired","instability","install","installation","installment",
"instalment","instance","instant","instantaneous","instantly",
"instead","instep","instigate","instigation","instil",
"instill","instinct","instinctive","institute","institution",
"instruct","instruction","instructive","instructor","instructress",
"instrument","instrumental","instrumentalist","instrumentality","instrumentation",
"insubordinate","insubstantial","insufferable","insufficiency","insufficient",
"insular","insularity","insulate","insulation","insulator",
"insulin","insult","insuperable","insupportable","insurance",
"insure","insured","insurer","insurgent","insurmountable",
"insurrection","intact","intaglio","intake","intangible",
"integer","integral","integrate","integrated","integrity",
"integument","intellect","intellectual","intelligence","intelligent",
"intelligentsia","intelligible","intemperate","intend","intended",
"intense","intensifier","intensify","intensity","intensive",
"intent","intention","intentional","intentions","inter",
"interact","interaction","interbreed","intercalary","intercalate",
"intercede","intercept","interceptor","intercession","interchange",
"interchangeable","intercity","intercollegiate","intercom","intercommunicate",
"intercommunion","intercontinental","intercourse","interdenominational","interdependent",
"interdict","interest","interested","interesting","interests",
"interface","interfere","interference","interim","interior",
"interject","interjection","interlace","interlard","interleave",
"interline","interlinear","interlink","interlock","interlocutor",
"interloper","interlude","intermarriage","intermarry","intermediary",
"intermediate","interment","intermezzo","interminable","intermingle",
"intermission","intermittent","intern","internal","internalise",
"internalize","international","internationale","internationalise","internationalism",
"internationalize","interne","internecine","internee","internment",
"interpellate","interpenetrate","interpersonal","interplanetary","interplay",
"interpol","interpolate","interpolation","interpose","interposition",
"interpret","interpretation","interpretative","interpreter","interracial",
"interregnum","interrelate","interrelation","interrogate","interrogative",
"interrogatory","interrupt","intersect","intersection","intersperse",
"interstate","interstellar","interstice","intertribal","intertwine",
"interurban","interval","intervene","intervention","interview",
"interweave","intestate","intestinal","intestine","intimacy",
"intimate","intimidate","intimidation","into","intolerable",
"intolerant","intonation","intone","intoxicant","intoxicate",
"intractable","intramural","intransigent","intransitive","intravenous",
"intrench","intrepid","intricacy","intricate","intrigue",
"intrinsic","intro","introduce","introduction","introductory",
"introit","introspection","introspective","introvert","introverted",
"intrude","intruder","intrusion","intrusive","intrust",
"intuit","intuition","intuitive","intumescence","inundate",
"inundation","inure","invade","invalid","invalidate",
"invalidism","invaluable","invariable","invasion","invective",
"inveigh","inveigle","invent","invention","inventive",
"inventor","inventory","inverse","inversion","invert",
"invertebrate","invest","investigate","investiture","investment",
"inveterate","invidious","invigilate","invigorate","invincible",
"inviolable","inviolate","invisible","invitation","invite",
"inviting","invocation","invoice","invoke","involuntary",
"involve","involved","invulnerable","inward","inwardness",
"inwards","inwrought","iodin","iodine","iodise",
"iodize","ion","ionic","ionise","ionize",
"ionosphere","iota","iou","ipa","ira",
"irascible","irate","ire","iridescent","iridium",
"irishman","irk","irksome","iron","ironclad",
"ironic","ironically","ironing","ironmonger","ironmongery",
"ironmould","irons","ironstone","ironware","ironwork",
"ironworks","irony","irradiate","irrational","irreconcilable",
"irrecoverable","irredeemable","irreducible","irrefutable","irregular",
"irregularity","irrelevance","irrelevant","irreligious","irremediable",
"irremovable","irreparable","irreplaceable","irrepressible","irreproachable",
"irresistible","irresolute","irresponsible","irretrievable","irreverent",
"irreversible","irrevocable","irrigate","irritable","irritant",
"irritate","irritation","irruption","isinglass","islam",
"island","islander","isle","islet","ism",
"isobar","isolate","isolated","isolation","isolationism",
"isotherm","isotope","israelite","issue","isthmus",
"ita","italic","italicise","italicize","italics",
"itch","itchy","item","itemise","itemize",
"iterate","itinerant","itinerary","itn","its",
"itself","itv","iud","ivied","ivory",
"ivy","jab","jabber","jack","jackal",
"jackanapes","jackaroo","jackass","jackboot","jackdaw",
"jackeroo","jacket","jackpot","jackrabbit","jacobean",
"jacobite","jade","jaded","jaffa","jag",
"jagged","jaguar","jail","jailbird","jailbreak",
"jailer","jailor","jalopy","jam","jamb",
"jamboree","jammy","jangle","janissary","janitor",
"january","japan","jape","japonica","jar",
"jargon","jasmine","jasper","jaundice","jaundiced",
"jaunt","jaunty","javelin","jaw","jawbone",
"jawbreaker","jaws","jay","jaywalk","jazz",
"jazzy","jealous","jealousy","jeans","jeep",
"jeer","jehovah","jejune","jell","jellied",
"jello","jelly","jellyfish","jemmy","jenny",
"jeopardise","jeopardize","jeopardy","jerboa","jeremiad",
"jerk","jerkin","jerky","jeroboam","jerry",
"jersey","jest","jester","jesting","jesuit",
"jesuitical","jet","jetsam","jettison","jetty",
"jew","jewel","jeweled","jeweler","jewelled",
"jeweller","jewellery","jewelry","jewess","jewish",
"jezebel","jib","jibe","jiffy","jig",
"jigger","jiggered","jiggle","jigsaw","jihad",
"jilt","jiminy","jimjams","jimmy","jingle",
"jingo","jingoism","jinks","jinn","jinrikisha",
"jinx","jitney","jitterbug","jitters","jiujitsu",
"jive","jnr","job","jobber","jobbery",
"jobbing","jobless","jockey","jockstrap","jocose",
"jocular","jocund","jodhpurs","jog","joggle",
"john","johnny","join","joiner","joinery",
"joint","joist","joke","joker","jollification",
"jollity","jolly","jolt","jolty","jonah",
"jonquil","josh","jostle","jot","jotter",
"jotting","joule","journal","journalese","journalism",
"journalist","journey","journeyman","joust","jove",
"jovial","jowl","joy","joyful","joyless",
"joyous","joyride","joystick","jubilant","jubilation",
"jubilee","judaic","judaism","judder","judge",
"judgement","judgment","judicature","judicial","judiciary",
"judicious","judo","jug","juggernaut","juggle",
"juice","juicy","jujitsu","juju","jujube",
"jukebox","julep","july","jumble","jumbo",
"jump","jumper","jumps","jumpy","junction",
"juncture","june","jungle","junior","juniper",
"junk","junket","junketing","junkie","junky",
"junoesque","junta","jupiter","juridical","jurisdiction",
"jurisprudence","jurist","juror","jury","juryman",
"just","justice","justifiable","justification","justified",
"justify","jut","jute","juvenile","juxtapose",
"juxtaposition","kaffir","kafir","kaftan","kail",
"kaiser","kale","kaleidoscope","kaleidoscopic","kalends",
"kampong","kangaroo","kaolin","kapok","kappa",
"kaput","karat","karate","karma","katydid",
"kayak","kazoo","kebab","kebob","kedgeree",
"keel","keelhaul","keen","keep","keeper",
"keeping","keeps","keepsake","keg","kelp",
"kelvin","ken","kennel","kennels","kepi",
"kept","kerb","kerchief","kerfuffle","kernel",
"kerosene","kerosine","kersey","kestrel","ketch",
"ketchup","kettle","kettledrum","key","keyboard",
"keyhole","keyless","keynote","keypunch","keystone",
"khaki","khalif","khalifate","khan","kibbutz",
"kibosh","kick","kickback","kicker","kickoff",
"kicks","kid","kiddie","kiddy","kidnap",
"kidney","kike","kill","killer","killing",
"killjoy","kiln","kilo","kilogram","kilogramme",
"kilohertz","kiloliter","kilolitre","kilometer","kilometre",
"kilowatt","kilt","kimono","kin","kind",
"kindergarten","kindle","kindling","kindly","kindness",
"kindred","kine","kinetic","kinetics","kinfolk",
"king","kingcup","kingdom","kingfisher","kingly",
"kingmaker","kingpin","kings","kingship","kink",
"kinky","kinsfolk","kinship","kinsman","kiosk",
"kip","kipper","kirk","kirsch","kirtle",
"kismet","kiss","kisser","kit","kitchen",
"kitchenette","kite","kitsch","kitten","kittenish",
"kittiwake","kitty","kiwi","klaxon","kleenex",
"kleptomania","kleptomaniac","knack","knacker","knackered",
"knapsack","knave","knavery","knead","knee",
"kneecap","kneel","knell","knew","knickerbockers",
"knickers","knife","knight","knighthood","knightly",
"knit","knitter","knitting","knitwear","knives",
"knob","knobbly","knobkerrie","knock","knockabout",
"knockdown","knocker","knockers","knockout","knoll",
"knot","knothole","knotty","knout","know",
"knowing","knowingly","knowledge","knowledgeable","known",
"knuckle","koala","kohl","kohlrabi","kookaburra",
"kopeck","kopek","kopje","koppie","koran",
"kosher","kowtow","kraal","kremlin","kris",
"krona","krone","kudos","kukri","kumis",
"kumquat","kuomintang","kurus","kvass","kwashiorkor",
"kwela","laager","lab","label","labial",
"labor","laboratory","laborer","laborious","labour",
"labourer","labourite","labrador","laburnum","labyrinth",
"lace","lacerate","laceration","lachrymal","lachrymose",
"lack","lackadaisical","lackey","lacking","lackluster",
"lacklustre","laconic","lacquer","lacrosse","lactation",
"lactic","lactose","lacuna","lacy","lad",
"ladder","laddie","laddy","laden","ladies",
"lading","ladle","lady","ladybird","ladylike",
"ladyship","lag","lager","laggard","lagging",
"lagoon","laid","lain","lair","laird",
"laity","lake","lam","lama","lamaism",
"lamasery","lamb","lambaste","lambent","lambkin",
"lamblike","lambskin","lame","lament","lamentable",
"lamentation","laminate","lamming","lamp","lampoon",
"lamppost","lamprey","lampshade","lance","lancer",
"lancers","lancet","land","landau","landed",
"landfall","landing","landlady","landlocked","landlord",
"landlubber","landmark","landmine","lands","landscape",
"landslide","landslip","landward","landwards","lane",
"language","languid","languish","languor","lank",
"lanky","lanolin","lantern","lanternslide","lanyard",
"lap","lapdog","lapel","lapidary","lapse",
"lapsed","lapwing","larboard","larceny","larch",
"lard","larder","large","largely","largess",
"largesse","largo","lariat","lark","larkspur",
"larrup","larva","laryngeal","laryngitis","laryngoscope",
"larynx","lasagna","lascivious","laser","lash",
"lashing","lashings","lass","lasso","last",
"lasting","lastly","lat","latch","latchkey",
"late","latecomer","lately","latent","lateral",
"latest","latex","lath","lathe","lather",
"latin","latinise","latinize","latitude","latitudes",
"latitudinal","latitudinarian","latrine","latter","latterly",
"lattice","laud","laudable","laudanum","laudatory",
"laugh","laughable","laughingstock","laughter","launch",
"launder","launderette","laundress","laundry","laureate",
"laurel","laurels","lava","lavatory","lave",
"lavender","lavish","law","lawful","lawless",
"lawn","lawsuit","lawyer","lax","laxative",
"laxity","lay","layabout","layer","layette",
"layman","layout","laze","lazy","lbw",
"lcm","lea","leach","lead","leaden",
"leader","leadership","leading","leads","leaf",
"leafage","leafed","leaflet","leafy","league",
"leak","leakage","leaky","lean","leaning",
"leap","leapfrog","learn","learned","learner",
"learning","lease","leasehold","leash","least",
"leastways","leather","leatherette","leathery","leave",
"leaved","leaven","leavening","leaves","leavings",
"lech","lecher","lecherous","lechery","lectern",
"lecture","lecturer","lectureship","led","ledge",
"ledger","lee","leech","leek","leer",
"leery","lees","leeward","leeway","left",
"leftist","leftovers","leftward","leftwards","leg",
"legacy","legal","legalise","legality","legalize",
"legate","legatee","legation","legato","legend",
"legendary","leger","legerdemain","legged","leggings",
"leggy","legible","legion","legionary","legislate",
"legislation","legislative","legislator","legislature","legit",
"legitimate","legitimatise","legitimatize","legroom","legume",
"leguminous","lei","leisure","leisured","leisurely",
"leitmotif","leitmotive","lemming","lemon","lemonade",
"lemur","lend","length","lengthen","lengthways",
"lengthy","lenience","lenient","lenity","lens",
"lent","lentil","lento","leo","leonine",
"leopard","leotard","leper","leprechaun","leprosy",
"lesbian","lesion","less","lessee","lessen",
"lesser","lesson","lessor","lest","let",
"letdown","lethal","lethargy","letraset","letter",
"letterbox","lettered","letterhead","lettering","letterpress",
"letters","letting","lettuce","letup","leucocyte",
"leucotomy","leukaemia","leukemia","leukocyte","levee",
"level","leveler","leveller","lever","leverage",
"leveret","leviathan","levitate","levity","levodopa",
"levy","lewd","lexical","lexicographer","lexicography",
"lexicon","lexis","liability","liable","liaise",
"liaison","liana","liar","lib","libation",
"libel","libellous","libelous","liberal","liberalise",
"liberalism","liberality","liberalize","liberally","liberate",
"liberated","liberation","libertarian","liberties","libertine",
"liberty","libidinous","libido","libra","librarian",
"library","librettist","libretto","lice","licence",
"licenced","license","licensed","licensee","licentiate",
"licentious","lichen","licit","lick","licking",
"licorice","lid","lido","lie","lieder",
"lief","liege","lien","lieu","lieutenant",
"life","lifeblood","lifeboat","lifeguard","lifeless",
"lifelike","lifeline","lifelong","lifer","lifetime",
"lift","liftboy","ligament","ligature","light",
"lighten","lighter","lighterage","lighthouse","lighting",
"lightly","lightness","lightning","lights","lightship",
"lightweight","ligneous","lignite","likable","like",
"likeable","likelihood","likely","liken","likeness",
"likes","likewise","liking","lilac","lilliputian",
"lilo","lilt","lily","limb","limber",
"limbo","lime","limeade","limejuice","limekiln",
"limelight","limerick","limestone","limey","limit",
"limitation","limited","limiting","limitless","limn",
"limousine","limp","limpet","limpid","limy",
"linchpin","linctus","linden","line","lineage",
"lineal","lineament","linear","lineman","linen",
"lineout","liner","linertrain","lines","lineshooter",
"linesman","lineup","ling","linger","lingerie",
"lingering","lingo","lingual","linguist","linguistic",
"linguistics","liniment","lining","link","linkage",
"linkman","links","linkup","linnet","linocut",
"linoleum","linotype","linseed","lint","lintel",
"lion","lionize","lip","lipid","lipstick",
"liquefaction","liquefy","liquescent","liqueur","liquid",
"liquidate","liquidation","liquidator","liquidity","liquidize",
"liquidizer","liquor","liquorice","lira","lisle",
"lisp","lissom","lissome","list","listen",
"listenable","listener","listless","lists","lit",
"litany","litchi","liter","literacy","literal",
"literally","literary","literate","literati","literature",
"lithe","lithium","lithograph","lithographic","lithography",
"litigant","litigate","litigation","litigious","litmus",
"litotes","litre","litter","litterateur","litterbin",
"litterlout","little","littoral","liturgical","liturgy",
"livable","live","liveable","livelihood","livelong",
"lively","liven","liver","liveried","liverish",
"livery","liveryman","lives","livestock","livid",
"living","lizard","llama","load","loaded",
"loadstar","loadstone","loaf","loafsugar","loam",
"loan","loanword","loath","loathe","loathing",
"loathsome","loaves","lob","lobby","lobed",
"lobotomy","lobster","lobsterpot","local","locale",
"localise","localism","locality","localize","locally",
"locate","located","location","loch","loci",
};
}
| vespa-engine/vespa | linguistics/src/main/java/com/yahoo/language/simple/kstem/KStemData4.java |
1,492 | // © 2016 and later: Unicode, Inc. and others.
// License & terms of use: http://www.unicode.org/copyright.html
/*
*******************************************************************************
* Copyright (C) 1996-2016, International Business Machines Corporation and *
* others. All Rights Reserved. *
*******************************************************************************
*/
package com.ibm.icu.util;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.Date;
import java.util.Locale;
import java.util.function.IntConsumer;
import com.ibm.icu.impl.CalendarAstronomer;
import com.ibm.icu.impl.CalendarCache;
import com.ibm.icu.impl.CalendarUtil;
import com.ibm.icu.util.ULocale.Category;
/**
* <code>IslamicCalendar</code> is a subclass of <code>Calendar</code>
* that that implements the Islamic civil and religious calendars. It
* is used as the civil calendar in most of the Arab world and the
* liturgical calendar of the Islamic faith worldwide. This calendar
* is also known as the "Hijri" calendar, since it starts at the time
* of Mohammed's emigration (or "hijra") to Medinah on Thursday,
* July 15, 622 AD (Julian).
* <p>
* The Islamic calendar is strictly lunar, and thus an Islamic year of twelve
* lunar months does not correspond to the solar year used by most other
* calendar systems, including the Gregorian. An Islamic year is, on average,
* about 354 days long, so each successive Islamic year starts about 11 days
* earlier in the corresponding Gregorian year.
* <p>
* Each month of the calendar starts when the new moon's crescent is visible
* at sunset. However, in order to keep the time fields in this class
* synchronized with those of the other calendars and with local clock time,
* we treat days and months as beginning at midnight,
* roughly 6 hours after the corresponding sunset.
* <p>
* There are three main variants of the Islamic calendar in existence. The first
* is the <em>civil</em> calendar, which uses a fixed cycle of alternating 29-
* and 30-day months, with a leap day added to the last month of 11 out of
* every 30 years. This calendar is easily calculated and thus predictable in
* advance, so it is used as the civil calendar in a number of Arab countries.
* This is the default behavior of a newly-created <code>IslamicCalendar</code>
* object.
* <p>
* The Islamic <em>religious</em> calendar and Saudi Arabia's <em>Umm al-Qura</em>
* calendar, however, are based on the <em>observation</em> of the crescent moon.
* It is thus affected by the position at which the
* observations are made, seasonal variations in the time of sunset, the
* eccentricities of the moon's orbit, and even the weather at the observation
* site. This makes it impossible to calculate in advance, and it causes the
* start of a month in the religious calendar to differ from the civil calendar
* by up to three days.
* <p>
* Using astronomical calculations for the position of the sun and moon, the
* moon's illumination, and other factors, it is possible to determine the start
* of a lunar month with a fairly high degree of certainty. However, these
* calculations are extremely complicated and thus slow, so most algorithms,
* including the one used here, are only approximations of the true astronomical
* calculations. At present, the approximations used in this class are fairly
* simplistic; they will be improved in later versions of the code.
* <p>
* Like the Islamic religious calendar, <em>Umm al-Qura</em> is also based
* on the sighting method of the crescent moon but is standardized by Saudi Arabia.
* <p>
* The {@link #setCalculationType(CalculationType) setCalculationType} method determines
* which approach is used to determine the start of a month. By default, the
* fixed-cycle <em>civil</em> calendar is used. However, if <code>setCalculationType(ISLAMIC)</code>
* is called, an approximation of the true lunar calendar will be used.
* Similarly, if <code>setCalculationType(ISLAMIC_UMALQURA)</code> is called, an approximation
* of the Umm al-Qura lunar calendar will be used.
* <p>
* This class should not be subclassed.</p>
* <p>
* IslamicCalendar usually should be instantiated using
* {@link com.ibm.icu.util.Calendar#getInstance(ULocale)} passing in a <code>ULocale</code>
* with the tag <code>"@calendar=islamic"</code> or <code>"@calendar=islamic-civil"</code>
* or <code>"@calendar=islamic-umalqura"</code>.</p>
*
* @see com.ibm.icu.util.GregorianCalendar
* @see com.ibm.icu.util.Calendar
*
* @author Laura Werner
* @author Alan Liu
* @stable ICU 2.8
*/
public class IslamicCalendar extends Calendar {
// jdk1.4.2 serialver
private static final long serialVersionUID = -6253365474073869325L;
//-------------------------------------------------------------------------
// Constants...
//-------------------------------------------------------------------------
/**
* Constant for Muharram, the 1st month of the Islamic year.
* @stable ICU 2.8
*/
public static final int MUHARRAM = 0;
/**
* Constant for Safar, the 2nd month of the Islamic year.
* @stable ICU 2.8
*/
public static final int SAFAR = 1;
/**
* Constant for Rabi' al-awwal (or Rabi' I), the 3rd month of the Islamic year.
* @stable ICU 2.8
*/
public static final int RABI_1 = 2;
/**
* Constant for Rabi' al-thani or (Rabi' II), the 4th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int RABI_2 = 3;
/**
* Constant for Jumada al-awwal or (Jumada I), the 5th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int JUMADA_1 = 4;
/**
* Constant for Jumada al-thani or (Jumada II), the 6th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int JUMADA_2 = 5;
/**
* Constant for Rajab, the 7th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int RAJAB = 6;
/**
* Constant for Sha'ban, the 8th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int SHABAN = 7;
/**
* Constant for Ramadan, the 9th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int RAMADAN = 8;
/**
* Constant for Shawwal, the 10th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int SHAWWAL = 9;
/**
* Constant for Dhu al-Qi'dah, the 11th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int DHU_AL_QIDAH = 10;
/**
* Constant for Dhu al-Hijjah, the 12th month of the Islamic year.
* @stable ICU 2.8
*/
public static final int DHU_AL_HIJJAH = 11;
private static final long HIJRA_MILLIS = -42521587200000L; // 7/16/622 AD 00:00
/**
* Friday EPOCH
*/
private static final long CIVIL_EPOCH = 1948440; // CE 622 July 16 Friday (Julian calendar) / CE 622 July 19 (Gregorian calendar)
//
/**
* Thursday EPOCH
*/
private static final long ASTRONOMICAL_EPOCH = 1948439; // CE 622 July 15 Thursday (Julian calendar)
//-------------------------------------------------------------------------
// Constructors...
//-------------------------------------------------------------------------
/**
* Constructs a default <code>IslamicCalendar</code> using the current time
* in the default time zone with the default <code>FORMAT</code> locale.
* @see Category#FORMAT
* @stable ICU 2.8
*/
public IslamicCalendar()
{
this(TimeZone.getDefault(), ULocale.getDefault(Category.FORMAT));
}
/**
* Constructs an <code>IslamicCalendar</code> based on the current time
* in the given time zone with the default <code>FORMAT</code> locale.
* @param zone the given time zone.
* @see Category#FORMAT
* @stable ICU 2.8
*/
public IslamicCalendar(TimeZone zone)
{
this(zone, ULocale.getDefault(Category.FORMAT));
}
/**
* Constructs an <code>IslamicCalendar</code> based on the current time
* in the default time zone with the given locale.
*
* @param aLocale the given locale.
* @stable ICU 2.8
*/
public IslamicCalendar(Locale aLocale)
{
this(TimeZone.forLocaleOrDefault(aLocale), aLocale);
}
/**
* Constructs an <code>IslamicCalendar</code> based on the current time
* in the default time zone with the given locale.
*
* @param locale the given ulocale.
* @stable ICU 3.2
*/
public IslamicCalendar(ULocale locale)
{
this(TimeZone.forULocaleOrDefault(locale), locale);
}
/**
* Constructs an <code>IslamicCalendar</code> based on the current time
* in the given time zone with the given locale.
*
* @param zone the given time zone.
* @param aLocale the given locale.
* @stable ICU 2.8
*/
public IslamicCalendar(TimeZone zone, Locale aLocale)
{
this(zone, ULocale.forLocale(aLocale));
}
/**
* Constructs an <code>IslamicCalendar</code> based on the current time
* in the given time zone with the given locale.
*
* @param zone the given time zone.
* @param locale the given ulocale.
* @stable ICU 3.2
*/
public IslamicCalendar(TimeZone zone, ULocale locale)
{
super(zone, locale);
setCalcTypeForLocale(locale);
setTimeInMillis(System.currentTimeMillis());
}
/**
* Constructs an <code>IslamicCalendar</code> with the given date set
* in the default time zone with the default <code>FORMAT</code> locale.
*
* @param date The date to which the new calendar is set.
* @see Category#FORMAT
* @stable ICU 2.8
*/
public IslamicCalendar(Date date) {
super(TimeZone.getDefault(), ULocale.getDefault(Category.FORMAT));
this.setTime(date);
}
/**
* Constructs an <code>IslamicCalendar</code> with the given date set
* in the default time zone with the default <code>FORMAT</code> locale.
*
* @param year the value used to set the {@link #YEAR YEAR} time field in the calendar.
* @param month the value used to set the {@link #MONTH MONTH} time field in the calendar.
* Note that the month value is 0-based. e.g., 0 for Muharram.
* @param date the value used to set the {@link #DATE DATE} time field in the calendar.
* @see Category#FORMAT
* @stable ICU 2.8
*/
public IslamicCalendar(int year, int month, int date)
{
super(TimeZone.getDefault(), ULocale.getDefault(Category.FORMAT));
this.set(Calendar.YEAR, year);
this.set(Calendar.MONTH, month);
this.set(Calendar.DATE, date);
}
/**
* Constructs an <code>IslamicCalendar</code> with the given date
* and time set for the default time zone with the default <code>FORMAT</code> locale.
*
* @param year the value used to set the {@link #YEAR YEAR} time field in the calendar.
* @param month the value used to set the {@link #MONTH MONTH} time field in the calendar.
* Note that the month value is 0-based. e.g., 0 for Muharram.
* @param date the value used to set the {@link #DATE DATE} time field in the calendar.
* @param hour the value used to set the {@link #HOUR_OF_DAY HOUR_OF_DAY} time field
* in the calendar.
* @param minute the value used to set the {@link #MINUTE MINUTE} time field
* in the calendar.
* @param second the value used to set the {@link #SECOND SECOND} time field
* in the calendar.
* @see Category#FORMAT
* @stable ICU 2.8
*/
public IslamicCalendar(int year, int month, int date, int hour,
int minute, int second)
{
super(TimeZone.getDefault(), ULocale.getDefault(Category.FORMAT));
this.set(Calendar.YEAR, year);
this.set(Calendar.MONTH, month);
this.set(Calendar.DATE, date);
this.set(Calendar.HOUR_OF_DAY, hour);
this.set(Calendar.MINUTE, minute);
this.set(Calendar.SECOND, second);
}
// Private interface for different Islamic calendar algorithms.
private interface Algorithm {
/**
* Returns <code>true</code> if this object is using the fixed-cycle civil
* calendar, or <code>false</code> if using the religious, astronomical
* calendar.
*/
public boolean isCivil();
/**
* Return the type the algorithm implement.
*/
public CalculationType getType();
/**
* Return the epoch used by this algorithm.
*/
public long epoch();
/**
* Return the day # on which the given year starts. Days are counted
* from the Hijri epoch, origin 0.
*
* @param year The hijri year
*/
public long yearStart(int year);
/**
* Return the day # on which the given month starts. Days are counted
* from the Hijri epoch, origin 0.
*
* @param year The hijri year
* @param month The hijri month, 0-based
*/
public long monthStart(int year, int month);
/**
* Return the length (in days) of the given month.
*
* @param year The hijri year
* @param month The hijri month, 0-based
*/
public int monthLength(int year, int month);
/**
* Return the length (in days) of the given year.
*
* @param year The hijri year
*/
public int yearLength(int year);
/**
* Compute the year, month, dayOfMonth, and dayOfYear of the given julian days
* and current time and feed the caculuated results to the consumers.
* @param julianDays
* @param current the time in millisecond.
* @param yearConsumer consumer to take the year result.
* @param monthConsumer consumer to take the month result.
* @param dayOfMonthConsumer consumer to take the dayOfMonth result.
* @param dayOfYearConsumer consumer to take the dayOfYear result.
*/
public void compute(long julianDays, long current,
IntConsumer yearConsumer, IntConsumer monthConsumer,
IntConsumer dayOfMonthConsumer, IntConsumer dayOfYearConsumer);
};
/**
* Algorithm which implement the rules for CalculationType.ISLAMIC.
*/
static private class IslamicAlgorithm implements Algorithm {
public boolean isCivil() {
return false;
}
public CalculationType getType() {
return CalculationType.ISLAMIC;
}
public long epoch() {
return CIVIL_EPOCH;
}
public long yearStart(int year) {
return monthStart(year, 0);
}
public long monthStart(int year, int month) {
// Normalize year/month in case month is outside the normal bounds, which may occur
// in the case of an add operation
return trueMonthStart(12*((year + month / 12)-1) + (month % 12));
}
public int monthLength(int year, int month) {
month += 12*(year-1);
return (int)(trueMonthStart(month+1) - trueMonthStart(month));
}
public int yearLength(int year) {
int month = 12*(year-1);
return (int)(trueMonthStart(month + 12) - trueMonthStart(month));
}
public void compute(long julianDays, long current,
IntConsumer yearConsumer, IntConsumer monthConsumer,
IntConsumer dayOfMonthConsumer, IntConsumer dayOfYearConsumer) {
long days = julianDays - epoch();
// Guess at the number of elapsed full months since the epoch
int month = (int)Math.floor(days / CalendarAstronomer.SYNODIC_MONTH);
long monthStart = (long)Math.floor(month * CalendarAstronomer.SYNODIC_MONTH - 1);
if (days - monthStart >= 25 && moonAge(current) > 0) {
// If we're near the end of the month, assume next month and search backwards
month++;
}
// Find out the last time that the new moon was actually visible at this longitude
// This returns midnight the night that the moon was visible at sunset.
while ((monthStart = trueMonthStart(month)) > days) {
// If it was after the date in question, back up a month and try again
month--;
}
int year = month >= 0 ? ((month / 12) + 1) : ((month + 1 ) / 12);
month = ((month % 12) + 12 ) % 12;
yearConsumer.accept(year);
monthConsumer.accept(month);
dayOfMonthConsumer.accept((int)(days - monthStart(year, month)) + 1);
dayOfYearConsumer.accept((int)(days - yearStart(year) + 1));
}
};
/**
* Algorithm which implement the rules for CalculationType.ISLAMIC_CIVIL.
*/
static private class CivilAlgorithm implements Algorithm {
public boolean isCivil() {
return true;
}
public CalculationType getType() {
return CalculationType.ISLAMIC_CIVIL;
}
public long epoch() {
return CIVIL_EPOCH;
}
public long yearStart(int year) {
return (year-1)*354 + (long)Math.floor((3+11*year)/30.0);
}
public long monthStart(int year, int month) {
// Normalize year/month in case month is outside the normal bounds, which may occur
// in the case of an add operation
return (long)Math.ceil(29.5*(month % 12)) + yearStart(year + month / 12);
}
public int monthLength(int year, int month) {
int length = 29;
if (month % 2 == 0) {
++length;
}
if (month == DHU_AL_HIJJAH && civilLeapYear(year)) {
++length;
}
return length;
}
public int yearLength(int year) {
return 354 + (civilLeapYear(year) ? 1 : 0);
}
public void compute(long julianDays, long current,
IntConsumer yearConsumer, IntConsumer monthConsumer,
IntConsumer dayOfMonthConsumer, IntConsumer dayOfYearConsumer) {
long days = julianDays - epoch();
// Use the civil calendar approximation, which is just arithmetic
int year = (int)Math.floor( (30 * days + 10646) / 10631.0 );
int month = (int)Math.ceil((days - 29 - yearStart(year)) / 29.5 );
month = Math.min(month, 11);
yearConsumer.accept(year);
monthConsumer.accept(month);
dayOfMonthConsumer.accept((int)(days - monthStart(year, month)) + 1);
dayOfYearConsumer.accept((int)(days - yearStart(year) + 1));
}
};
/**
* Algorithm which implement the rules for CalculationType.ISLAMIC_TBLA.
* Mostly the same as CivilAlgorithm, except it return false for isCivil and use different
* epoch value.
*/
static private class TBLAAlgorithm extends CivilAlgorithm {
public boolean isCivil() {
return false;
}
public CalculationType getType() {
return CalculationType.ISLAMIC_TBLA;
}
public long epoch() {
return ASTRONOMICAL_EPOCH;
}
};
/**
* Algorithm which implement the rules for CalculationType.ISLAMIC_UMALQURA.
*/
static private class UmalquraAlgorithm implements Algorithm {
public boolean isCivil() {
return false;
}
public CalculationType getType() {
return CalculationType.ISLAMIC_UMALQURA;
}
public long epoch() {
return CIVIL_EPOCH;
}
public long yearStart(int year) {
if (year < UMALQURA_YEAR_START || year > UMALQURA_YEAR_END) {
return CIVIL_ALGORITHM.yearStart(year);
}
int index = year - UMALQURA_YEAR_START;
// rounded least-squares fit of the dates previously calculated from UMALQURA_MONTHLENGTH iteration
int yrStartLinearEstimate = (int)((354.36720 * index) + 460322.05 + 0.5);
// need a slight correction to some
return yrStartLinearEstimate + UMALQURA_YEAR_START_ESTIMATE_FIX[index];
}
public long monthStart(int year, int month) {
// Normalize year/month in case month is outside the normal bounds, which may occur
// in the case of an add operation
year += month / 12;
month %= 12;
if (year < UMALQURA_YEAR_START) {
return CIVIL_ALGORITHM.monthStart(year, month);
}
long ms = yearStart(year);
for(int i=0; i< month; i++) {
ms+= monthLength(year, i);
}
return ms;
}
public int monthLength(int year, int month) {
if (year < UMALQURA_YEAR_START || year > UMALQURA_YEAR_END) {
return CIVIL_ALGORITHM.monthLength(year, month);
}
int index = (year - UMALQURA_YEAR_START); // calculate year offset into bit map array
int mask = (0x01 << (11 - month)); // set mask for bit corresponding to month
if((UMALQURA_MONTHLENGTH[index] & mask) != 0) {
return 30;
}
return 29;
}
public int yearLength(int year) {
if (year < UMALQURA_YEAR_START || year > UMALQURA_YEAR_END) {
return CIVIL_ALGORITHM.yearLength(year);
}
int length = 0;
for(int i = 0; i < 12; i++) {
length += monthLength(year, i);
}
return length;
}
public void compute(long julianDays, long current,
IntConsumer yearConsumer, IntConsumer monthConsumer,
IntConsumer dayOfMonthConsumer, IntConsumer dayOfYearConsumer) {
long days = julianDays - epoch();
if( days < yearStart(UMALQURA_YEAR_START)) {
CIVIL_ALGORITHM.compute(julianDays, current,
yearConsumer, monthConsumer, dayOfMonthConsumer, dayOfYearConsumer);
return;
}
// Estimate a value y which is closer to but not greater than the year.
// It is the inverse function of the logic inside yearStart() about the
// linear estimate.
int year = (int)((days - (460322.05 + 0.5)) / 354.36720) + UMALQURA_YEAR_START - 1;
int month = 0;
long monthStart;
long d = 1;
while (d > 0) {
year++;
d = days - yearStart(year) +1;
int yearLength = yearLength(year);
if (d == yearLength) {
month = 11;
break;
} else if (d < yearLength) {
int monthLen = monthLength(year, month);
for (month = 0; d > monthLen; monthLen = monthLength(year, ++month)) {
d -= monthLen;
}
break;
}
}
yearConsumer.accept(year);
monthConsumer.accept(month);
dayOfMonthConsumer.accept((int)(days - monthStart(year, month)) + 1);
dayOfYearConsumer.accept((int)(days - yearStart(year) + 1));
}
};
private static Algorithm ISLAMIC_ALGORITHM;
private static Algorithm CIVIL_ALGORITHM;
private static Algorithm TBLA_ALGORITHM;
private static Algorithm UMALQURA_ALGORITHM;
static {
ISLAMIC_ALGORITHM = new IslamicAlgorithm();
CIVIL_ALGORITHM = new CivilAlgorithm();
TBLA_ALGORITHM = new TBLAAlgorithm();
UMALQURA_ALGORITHM = new UmalquraAlgorithm();
};
/**
* Determines whether this object uses the fixed-cycle Islamic civil calendar
* or an approximation of the religious, astronomical calendar.
*
* @param beCivil <code>true</code> to use the civil calendar,
* <code>false</code> to use the astronomical calendar.
* @stable ICU 2.8
* @discouraged ICU 57 use setCalculationType(CalculationType) instead
*/
public void setCivil(boolean beCivil)
{
if (beCivil && cType != CalculationType.ISLAMIC_CIVIL) {
// The fields of the calendar will become invalid, because the calendar
// rules are different
long m = getTimeInMillis();
cType = CalculationType.ISLAMIC_CIVIL;
algorithm = CIVIL_ALGORITHM;
clear();
setTimeInMillis(m);
} else if(!beCivil && cType != CalculationType.ISLAMIC) {
// The fields of the calendar will become invalid, because the calendar
// rules are different
long m = getTimeInMillis();
cType = CalculationType.ISLAMIC;
algorithm = ISLAMIC_ALGORITHM;
clear();
setTimeInMillis(m);
}
civil = algorithm.isCivil();
}
/**
* Returns <code>true</code> if this object is using the fixed-cycle civil
* calendar, or <code>false</code> if using the religious, astronomical
* calendar.
* @stable ICU 2.8
* @discouraged ICU 57 use getCalculationType() instead
*/
public boolean isCivil() {
return algorithm.isCivil();
}
//-------------------------------------------------------------------------
// Minimum / Maximum access functions
//-------------------------------------------------------------------------
// Note: Current IslamicCalendar implementation does not work
// well with negative years.
private static final int LIMITS[][] = {
// Minimum Greatest Least Maximum
// Minimum Maximum
{ 0, 0, 0, 0}, // ERA
{ 1, 1, 5000000, 5000000}, // YEAR
{ 0, 0, 11, 11}, // MONTH
{ 1, 1, 50, 51}, // WEEK_OF_YEAR
{/* */}, // WEEK_OF_MONTH
{ 1, 1, 29, 30}, // DAY_OF_MONTH
{ 1, 1, 354, 355}, // DAY_OF_YEAR
{/* */}, // DAY_OF_WEEK
{ -1, -1, 5, 5}, // DAY_OF_WEEK_IN_MONTH
{/* */}, // AM_PM
{/* */}, // HOUR
{/* */}, // HOUR_OF_DAY
{/* */}, // MINUTE
{/* */}, // SECOND
{/* */}, // MILLISECOND
{/* */}, // ZONE_OFFSET
{/* */}, // DST_OFFSET
{ 1, 1, 5000000, 5000000}, // YEAR_WOY
{/* */}, // DOW_LOCAL
{ 1, 1, 5000000, 5000000}, // EXTENDED_YEAR
{/* */}, // JULIAN_DAY
{/* */}, // MILLISECONDS_IN_DAY
{/* */}, // IS_LEAP_MONTH
{ 0, 0, 11, 11 }, // ORDINAL_MONTH
};
/*
* bit map array where a bit turned on represents a month with 30 days.
*/
private static final int[] UMALQURA_MONTHLENGTH = {
//* 1300 -1302 */ "1010 1010 1010", "1101 0101 0100", "1110 1100 1001",
0x0AAA, 0x0D54, 0x0EC9,
//* 1303 -1307 */ "0110 1101 0100", "0110 1110 1010", "0011 0110 1100", "1010 1010 1101", "0101 0101 0101",
0x06D4, 0x06EA, 0x036C, 0x0AAD, 0x0555,
//* 1308 -1312 */ "0110 1010 1001", "0111 1001 0010", "1011 1010 1001", "0101 1101 0100", "1010 1101 1010",
0x06A9, 0x0792, 0x0BA9, 0x05D4, 0x0ADA,
//* 1313 -1317 */ "0101 0101 1100", "1101 0010 1101", "0110 1001 0101", "0111 0100 1010", "1011 0101 0100",
0x055C, 0x0D2D, 0x0695, 0x074A, 0x0B54,
//* 1318 -1322 */ "1011 0110 1010", "0101 1010 1101", "0100 1010 1110", "1010 0100 1111", "0101 0001 0111",
0x0B6A, 0x05AD, 0x04AE, 0x0A4F, 0x0517,
//* 1323 -1327 */ "0110 1000 1011", "0110 1010 0101", "1010 1101 0101", "0010 1101 0110", "1001 0101 1011",
0x068B, 0x06A5, 0x0AD5, 0x02D6, 0x095B,
//* 1328 -1332 */ "0100 1001 1101", "1010 0100 1101", "1101 0010 0110", "1101 1001 0101", "0101 1010 1100",
0x049D, 0x0A4D, 0x0D26, 0x0D95, 0x05AC,
//* 1333 -1337 */ "1001 1011 0110", "0010 1011 1010", "1010 0101 1011", "0101 0010 1011", "1010 1001 0101",
0x09B6, 0x02BA, 0x0A5B, 0x052B, 0x0A95,
//* 1338 -1342 */ "0110 1100 1010", "1010 1110 1001", "0010 1111 0100", "1001 0111 0110", "0010 1011 0110",
0x06CA, 0x0AE9, 0x02F4, 0x0976, 0x02B6,
//* 1343 -1347 */ "1001 0101 0110", "1010 1100 1010", "1011 1010 0100", "1011 1101 0010", "0101 1101 1001",
0x0956, 0x0ACA, 0x0BA4, 0x0BD2, 0x05D9,
//* 1348 -1352 */ "0010 1101 1100", "1001 0110 1101", "0101 0100 1101", "1010 1010 0101", "1011 0101 0010",
0x02DC, 0x096D, 0x054D, 0x0AA5, 0x0B52,
//* 1353 -1357 */ "1011 1010 0101", "0101 1011 0100", "1001 1011 0110", "0101 0101 0111", "0010 1001 0111",
0x0BA5, 0x05B4, 0x09B6, 0x0557, 0x0297,
//* 1358 -1362 */ "0101 0100 1011", "0110 1010 0011", "0111 0101 0010", "1011 0110 0101", "0101 0110 1010",
0x054B, 0x06A3, 0x0752, 0x0B65, 0x056A,
//* 1363 -1367 */ "1010 1010 1011", "0101 0010 1011", "1100 1001 0101", "1101 0100 1010", "1101 1010 0101",
0x0AAB, 0x052B, 0x0C95, 0x0D4A, 0x0DA5,
//* 1368 -1372 */ "0101 1100 1010", "1010 1101 0110", "1001 0101 0111", "0100 1010 1011", "1001 0100 1011",
0x05CA, 0x0AD6, 0x0957, 0x04AB, 0x094B,
//* 1373 -1377 */ "1010 1010 0101", "1011 0101 0010", "1011 0110 1010", "0101 0111 0101", "0010 0111 0110",
0x0AA5, 0x0B52, 0x0B6A, 0x0575, 0x0276,
//* 1378 -1382 */ "1000 1011 0111", "0100 0101 1011", "0101 0101 0101", "0101 1010 1001", "0101 1011 0100",
0x08B7, 0x045B, 0x0555, 0x05A9, 0x05B4,
//* 1383 -1387 */ "1001 1101 1010", "0100 1101 1101", "0010 0110 1110", "1001 0011 0110", "1010 1010 1010",
0x09DA, 0x04DD, 0x026E, 0x0936, 0x0AAA,
//* 1388 -1392 */ "1101 0101 0100", "1101 1011 0010", "0101 1101 0101", "0010 1101 1010", "1001 0101 1011",
0x0D54, 0x0DB2, 0x05D5, 0x02DA, 0x095B,
//* 1393 -1397 */ "0100 1010 1011", "1010 0101 0101", "1011 0100 1001", "1011 0110 0100", "1011 0111 0001",
0x04AB, 0x0A55, 0x0B49, 0x0B64, 0x0B71,
//* 1398 -1402 */ "0101 1011 0100", "1010 1011 0101", "1010 0101 0101", "1101 0010 0101", "1110 1001 0010",
0x05B4, 0x0AB5, 0x0A55, 0x0D25, 0x0E92,
//* 1403 -1407 */ "1110 1100 1001", "0110 1101 0100", "1010 1110 1001", "1001 0110 1011", "0100 1010 1011",
0x0EC9, 0x06D4, 0x0AE9, 0x096B, 0x04AB,
//* 1408 -1412 */ "1010 1001 0011", "1101 0100 1001", "1101 1010 0100", "1101 1011 0010", "1010 1011 1001",
0x0A93, 0x0D49, 0x0DA4, 0x0DB2, 0x0AB9,
//* 1413 -1417 */ "0100 1011 1010", "1010 0101 1011", "0101 0010 1011", "1010 1001 0101", "1011 0010 1010",
0x04BA, 0x0A5B, 0x052B, 0x0A95, 0x0B2A,
//* 1418 -1422 */ "1011 0101 0101", "0101 0101 1100", "0100 1011 1101", "0010 0011 1101", "1001 0001 1101",
0x0B55, 0x055C, 0x04BD, 0x023D, 0x091D,
//* 1423 -1427 */ "1010 1001 0101", "1011 0100 1010", "1011 0101 1010", "0101 0110 1101", "0010 1011 0110",
0x0A95, 0x0B4A, 0x0B5A, 0x056D, 0x02B6,
//* 1428 -1432 */ "1001 0011 1011", "0100 1001 1011", "0110 0101 0101", "0110 1010 1001", "0111 0101 0100",
0x093B, 0x049B, 0x0655, 0x06A9, 0x0754,
//* 1433 -1437 */ "1011 0110 1010", "0101 0110 1100", "1010 1010 1101", "0101 0101 0101", "1011 0010 1001",
0x0B6A, 0x056C, 0x0AAD, 0x0555, 0x0B29,
//* 1438 -1442 */ "1011 1001 0010", "1011 1010 1001", "0101 1101 0100", "1010 1101 1010", "0101 0101 1010",
0x0B92, 0x0BA9, 0x05D4, 0x0ADA, 0x055A,
//* 1443 -1447 */ "1010 1010 1011", "0101 1001 0101", "0111 0100 1001", "0111 0110 0100", "1011 1010 1010",
0x0AAB, 0x0595, 0x0749, 0x0764, 0x0BAA,
//* 1448 -1452 */ "0101 1011 0101", "0010 1011 0110", "1010 0101 0110", "1110 0100 1101", "1011 0010 0101",
0x05B5, 0x02B6, 0x0A56, 0x0E4D, 0x0B25,
//* 1453 -1457 */ "1011 0101 0010", "1011 0110 1010", "0101 1010 1101", "0010 1010 1110", "1001 0010 1111",
0x0B52, 0x0B6A, 0x05AD, 0x02AE, 0x092F,
//* 1458 -1462 */ "0100 1001 0111", "0110 0100 1011", "0110 1010 0101", "0110 1010 1100", "1010 1101 0110",
0x0497, 0x064B, 0x06A5, 0x06AC, 0x0AD6,
//* 1463 -1467 */ "0101 0101 1101", "0100 1001 1101", "1010 0100 1101", "1101 0001 0110", "1101 1001 0101",
0x055D, 0x049D, 0x0A4D, 0x0D16, 0x0D95,
//* 1468 -1472 */ "0101 1010 1010", "0101 1011 0101", "0010 1101 1010", "1001 0101 1011", "0100 1010 1101",
0x05AA, 0x05B5, 0x02DA, 0x095B, 0x04AD,
//* 1473 -1477 */ "0101 1001 0101", "0110 1100 1010", "0110 1110 0100", "1010 1110 1010", "0100 1111 0101",
0x0595, 0x06CA, 0x06E4, 0x0AEA, 0x04F5,
//* 1478 -1482 */ "0010 1011 0110", "1001 0101 0110", "1010 1010 1010", "1011 0101 0100", "1011 1101 0010",
0x02B6, 0x0956, 0x0AAA, 0x0B54, 0x0BD2,
//* 1483 -1487 */ "0101 1101 1001", "0010 1110 1010", "1001 0110 1101", "0100 1010 1101", "1010 1001 0101",
0x05D9, 0x02EA, 0x096D, 0x04AD, 0x0A95,
//* 1488 -1492 */ "1011 0100 1010", "1011 1010 0101", "0101 1011 0010", "1001 1011 0101", "0100 1101 0110",
0x0B4A, 0x0BA5, 0x05B2, 0x09B5, 0x04D6,
//* 1493 -1497 */ "1010 1001 0111", "0101 0100 0111", "0110 1001 0011", "0111 0100 1001", "1011 0101 0101",
0x0A97, 0x0547, 0x0693, 0x0749, 0x0B55,
//* 1498 -1508 */ "0101 0110 1010", "1010 0110 1011", "0101 0010 1011", "1010 1000 1011", "1101 0100 0110", "1101 1010 0011", "0101 1100 1010", "1010 1101 0110", "0100 1101 1011", "0010 0110 1011", "1001 0100 1011",
0x056A, 0x0A6B, 0x052B, 0x0A8B, 0x0D46, 0x0DA3, 0x05CA, 0x0AD6, 0x04DB, 0x026B, 0x094B,
//* 1509 -1519 */ "1010 1010 0101", "1011 0101 0010", "1011 0110 1001", "0101 0111 0101", "0001 0111 0110", "1000 1011 0111", "0010 0101 1011", "0101 0010 1011", "0101 0110 0101", "0101 1011 0100", "1001 1101 1010",
0x0AA5, 0x0B52, 0x0B69, 0x0575, 0x0176, 0x08B7, 0x025B, 0x052B, 0x0565, 0x05B4, 0x09DA,
//* 1520 -1530 */ "0100 1110 1101", "0001 0110 1101", "1000 1011 0110", "1010 1010 0110", "1101 0101 0010", "1101 1010 1001", "0101 1101 0100", "1010 1101 1010", "1001 0101 1011", "0100 1010 1011", "0110 0101 0011",
0x04ED, 0x016D, 0x08B6, 0x0AA6, 0x0D52, 0x0DA9, 0x05D4, 0x0ADA, 0x095B, 0x04AB, 0x0653,
//* 1531 -1541 */ "0111 0010 1001", "0111 0110 0010", "1011 1010 1001", "0101 1011 0010", "1010 1011 0101", "0101 0101 0101", "1011 0010 0101", "1101 1001 0010", "1110 1100 1001", "0110 1101 0010", "1010 1110 1001",
0x0729, 0x0762, 0x0BA9, 0x05B2, 0x0AB5, 0x0555, 0x0B25, 0x0D92, 0x0EC9, 0x06D2, 0x0AE9,
//* 1542 -1552 */ "0101 0110 1011", "0100 1010 1011", "1010 0101 0101", "1101 0010 1001", "1101 0101 0100", "1101 1010 1010", "1001 1011 0101", "0100 1011 1010", "1010 0011 1011", "0100 1001 1011", "1010 0100 1101",
0x056B, 0x04AB, 0x0A55, 0x0D29, 0x0D54, 0x0DAA, 0x09B5, 0x04BA, 0x0A3B, 0x049B, 0x0A4D,
//* 1553 -1563 */ "1010 1010 1010", "1010 1101 0101", "0010 1101 1010", "1001 0101 1101", "0100 0101 1110", "1010 0010 1110", "1100 1001 1010", "1101 0101 0101", "0110 1011 0010", "0110 1011 1001", "0100 1011 1010",
0x0AAA, 0x0AD5, 0x02DA, 0x095D, 0x045E, 0x0A2E, 0x0C9A, 0x0D55, 0x06B2, 0x06B9, 0x04BA,
//* 1564 -1574 */ "1010 0101 1101", "0101 0010 1101", "1010 1001 0101", "1011 0101 0010", "1011 1010 1000", "1011 1011 0100", "0101 1011 1001", "0010 1101 1010", "1001 0101 1010", "1011 0100 1010", "1101 1010 0100",
0x0A5D, 0x052D, 0x0A95, 0x0B52, 0x0BA8, 0x0BB4, 0x05B9, 0x02DA, 0x095A, 0x0B4A, 0x0DA4,
//* 1575 -1585 */ "1110 1101 0001", "0110 1110 1000", "1011 0110 1010", "0101 0110 1101", "0101 0011 0101", "0110 1001 0101", "1101 0100 1010", "1101 1010 1000", "1101 1101 0100", "0110 1101 1010", "0101 0101 1011",
0x0ED1, 0x06E8, 0x0B6A, 0x056D, 0x0535, 0x0695, 0x0D4A, 0x0DA8, 0x0DD4, 0x06DA, 0x055B,
//* 1586 -1596 */ "0010 1001 1101", "0110 0010 1011", "1011 0001 0101", "1011 0100 1010", "1011 1001 0101", "0101 1010 1010", "1010 1010 1110", "1001 0010 1110", "1100 1000 1111", "0101 0010 0111", "0110 1001 0101",
0x029D, 0x062B, 0x0B15, 0x0B4A, 0x0B95, 0x05AA, 0x0AAE, 0x092E, 0x0C8F, 0x0527, 0x0695,
//* 1597 -1600 */ "0110 1010 1010", "1010 1101 0110", "0101 0101 1101", "0010 1001 1101", };
0x06AA, 0x0AD6, 0x055D, 0x029D
};
private static final int UMALQURA_YEAR_START = 1300;
private static final int UMALQURA_YEAR_END = 1600;
/**
* @stable ICU 2.8
*/
@Override
protected int handleGetLimit(int field, int limitType) {
return LIMITS[field][limitType];
}
//-------------------------------------------------------------------------
// Assorted calculation utilities
//
// we could compress this down more if we need to
private static final byte[] UMALQURA_YEAR_START_ESTIMATE_FIX = {
0, 0, -1, 0, -1, 0, 0, 0, 0, 0, // 1300..
-1, 0, 0, 0, 0, 0, 0, 0, -1, 0, // 1310..
1, 0, 1, 1, 0, 0, 0, 0, 1, 0, // 1320..
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, // 1330..
0, 0, 1, 0, 0, -1, -1, 0, 0, 0, // 1340..
1, 0, 0, -1, 0, 0, 0, 1, 1, 0, // 1350..
0, 0, 0, 0, 0, 0, 0, -1, 0, 0, // 1360..
0, 1, 1, 0, 0, -1, 0, 1, 0, 1, // 1370..
1, 0, 0, -1, 0, 1, 0, 0, 0, -1, // 1380..
0, 1, 0, 1, 0, 0, 0, -1, 0, 0, // 1390..
0, 0, -1, -1, 0, -1, 0, 1, 0, 0, // 1400..
0, -1, 0, 0, 0, 1, 0, 0, 0, 0, // 1410..
0, 1, 0, 0, -1, -1, 0, 0, 0, 1, // 1420..
0, 0, -1, -1, 0, -1, 0, 0, -1, -1, // 1430..
0, -1, 0, -1, 0, 0, -1, -1, 0, 0, // 1440..
0, 0, 0, 0, -1, 0, 1, 0, 1, 1, // 1450..
0, 0, -1, 0, 1, 0, 0, 0, 0, 0, // 1460..
1, 0, 1, 0, 0, 0, -1, 0, 1, 0, // 1470..
0, -1, -1, 0, 0, 0, 1, 0, 0, 0, // 1480..
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, // 1490..
1, 0, 0, -1, 0, 0, 0, 1, 1, 0, // 1500..
0, -1, 0, 1, 0, 1, 1, 0, 0, 0, // 1510..
0, 1, 0, 0, 0, -1, 0, 0, 0, 1, // 1520..
0, 0, 0, -1, 0, 0, 0, 0, 0, -1, // 1530..
0, -1, 0, 1, 0, 0, 0, -1, 0, 1, // 1540..
0, 1, 0, 0, 0, 0, 0, 1, 0, 0, // 1550..
-1, 0, 0, 0, 0, 1, 0, 0, 0, -1, // 1560..
0, 0, 0, 0, -1, -1, 0, -1, 0, 1, // 1570..
0, 0, -1, -1, 0, 0, 1, 1, 0, 0, // 1580..
-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, // 1590..
1 // 1600
};
// Unused code - Alan 2003-05
// /**
// * Find the day of the week for a given day
// *
// * @param day The # of days since the start of the Islamic calendar.
// */
// // private and uncalled, perhaps not used yet?
// private static final int absoluteDayToDayOfWeek(long day)
// {
// // Calculate the day of the week.
// // This relies on the fact that the epoch was a Thursday.
// int dayOfWeek = (int)(day + THURSDAY) % 7 + SUNDAY;
// if (dayOfWeek < 0) {
// dayOfWeek += 7;
// }
// return dayOfWeek;
// }
/**
* Determine whether a year is a leap year in the Islamic civil calendar
*/
private final static boolean civilLeapYear(int year)
{
return (14 + 11 * year) % 30 < 11;
}
/**
* Return the day # on which the given year starts. Days are counted
* from the Hijri epoch, origin 0.
*/
private long yearStart(int year) {
return algorithm.yearStart(year);
}
/**
* Find the day number on which a particular month of the true/lunar
* Islamic calendar starts.
*
* @param month The month in question, origin 0 from the Hijri epoch
*
* @return The day number on which the given month starts.
*/
private static final long trueMonthStart(long month)
{
long start = cache.get(month);
if (start == CalendarCache.EMPTY)
{
// Make a guess at when the month started, using the average length
long origin = HIJRA_MILLIS
+ (long)Math.floor(month * CalendarAstronomer.SYNODIC_MONTH) * ONE_DAY;
double age = moonAge(origin);
if (moonAge(origin) >= 0) {
// The month has already started
do {
origin -= ONE_DAY;
age = moonAge(origin);
} while (age >= 0);
}
else {
// Preceding month has not ended yet.
do {
origin += ONE_DAY;
age = moonAge(origin);
} while (age < 0);
}
start = (origin - HIJRA_MILLIS) / ONE_DAY + 1;
cache.put(month, start);
}
return start;
}
/**
* Return the "age" of the moon at the given time; this is the difference
* in ecliptic latitude between the moon and the sun. This method simply
* calls CalendarAstronomer.moonAge, converts to degrees,
* and adjusts the resultto be in the range [-180, 180].
*
* @param time The time at which the moon's age is desired,
* in millis since 1/1/1970.
*/
static final double moonAge(long time)
{
double age = (new CalendarAstronomer(time)).getMoonAge();
// Convert to degrees and normalize...
age = age * 180 / Math.PI;
if (age > 180) {
age = age - 360;
}
return age;
}
//-------------------------------------------------------------------------
// Internal data....
//
private static CalendarCache cache = new CalendarCache();
/**
* <code>true</code> if this object uses the fixed-cycle Islamic civil calendar,
* and <code>false</code> if it approximates the true religious calendar using
* astronomical calculations for the time of the new moon.
*
* @serial
*/
private boolean civil = true;
/**
* determines the type of calculation to use for this instance
*
* @serial
* @stable ICU 52
*/
private CalculationType cType = CalculationType.ISLAMIC_CIVIL;
private transient Algorithm algorithm = CIVIL_ALGORITHM;
//----------------------------------------------------------------------
// Calendar framework
//----------------------------------------------------------------------
/**
* Return the length (in days) of the given month.
*
* @param extendedYear The hijri year
* @param month The hijri month, 0-based
* @stable ICU 2.8
*/
@Override
protected int handleGetMonthLength(int extendedYear, int month) {
return algorithm.monthLength(extendedYear, month);
}
/**
* Return the number of days in the given Islamic year
* @stable ICU 2.8
*/
@Override
protected int handleGetYearLength(int extendedYear) {
return algorithm.yearLength(extendedYear);
}
//-------------------------------------------------------------------------
// Functions for converting from field values to milliseconds....
//-------------------------------------------------------------------------
// Return JD of start of given month/year
// Calendar says:
// Get the Julian day of the day BEFORE the start of this year.
// If useMonth is true, get the day before the start of the month.
// Hence the -1
/**
* @stable ICU 2.8
*/
@Override
protected int handleComputeMonthStart(int eyear, int month, boolean useMonth) {
return (int)(algorithm.monthStart(eyear, month) + algorithm.epoch()- 1);
}
//-------------------------------------------------------------------------
// Functions for converting from milliseconds to field values
//-------------------------------------------------------------------------
/**
* @stable ICU 2.8
*/
@Override
protected int handleGetExtendedYear() {
int year;
if (newerField(EXTENDED_YEAR, YEAR) == EXTENDED_YEAR) {
year = internalGet(EXTENDED_YEAR, 1); // Default to year 1
} else {
year = internalGet(YEAR, 1); // Default to year 1
}
return year;
}
/**
* Override Calendar to compute several fields specific to the Islamic
* calendar system. These are:
*
* <ul><li>ERA
* <li>YEAR
* <li>MONTH
* <li>DAY_OF_MONTH
* <li>DAY_OF_YEAR
* <li>EXTENDED_YEAR</ul>
*
* The DAY_OF_WEEK and DOW_LOCAL fields are already set when this
* method is called. The getGregorianXxx() methods return Gregorian
* calendar equivalents for the given Julian day.
* @stable ICU 2.8
*/
@Override
protected void handleComputeFields(int julianDay) {
algorithm.compute(julianDay, internalGetTimeInMillis(),
year -> {
internalSet(ERA, 0);
internalSet(YEAR, year);
internalSet(EXTENDED_YEAR, year);
},
month -> {
internalSet(MONTH, month);
internalSet(ORDINAL_MONTH, month);
},
dayOfMonth -> { internalSet(DAY_OF_MONTH, dayOfMonth); },
dayOfYear -> { internalSet(DAY_OF_YEAR, dayOfYear); });
}
/**
* enumeration of available calendar calculation types
*
* @stable ICU 52
*/
public enum CalculationType {
/**
* Religious calendar (astronomical simulation)
* @stable ICU 52
*/
ISLAMIC ("islamic"),
/**
* Tabular (intercalary years [2,5,7,10,13,16,18,21,24,26,29]) algorithm
* with civil (Friday) epoch.
* @stable ICU 52
*/
ISLAMIC_CIVIL ("islamic-civil"),
/**
* Umm al-Qura calendar
* @stable ICU 52
*/
ISLAMIC_UMALQURA ("islamic-umalqura"),
/**
* Tabular (intercalary years [2,5,7,10,13,16,18,21,24,26,29]) algorithm
* with astronomical (Thursday) epoch.
* @stable ICU 52
*/
ISLAMIC_TBLA ("islamic-tbla");
private String bcpType;
CalculationType(String bcpType) {
this.bcpType = bcpType;
}
String bcpType() {
return bcpType;
}
};
/**
* sets the calculation type for this calendar.
*
* @stable ICU 55
*/
public void setCalculationType(CalculationType type) {
cType = type;
switch (cType) {
case ISLAMIC_UMALQURA:
algorithm = UMALQURA_ALGORITHM;
break;
case ISLAMIC:
algorithm = ISLAMIC_ALGORITHM;
break;
case ISLAMIC_TBLA:
algorithm = TBLA_ALGORITHM;
break;
case ISLAMIC_CIVIL:
default:
algorithm = CIVIL_ALGORITHM;
break;
}
civil = algorithm.isCivil();
}
/**
* gets the calculation type for this calendar.
*
* @stable ICU 55
*/
public CalculationType getCalculationType() {
return algorithm.getType();
}
/**
* set type based on locale
*/
private void setCalcTypeForLocale(ULocale locale) {
String localeCalType = CalendarUtil.getCalendarType(locale);
if("islamic-civil".equals(localeCalType))
setCalculationType(CalculationType.ISLAMIC_CIVIL);
else if("islamic-umalqura".equals(localeCalType))
setCalculationType(CalculationType.ISLAMIC_UMALQURA);
else if("islamic-tbla".equals(localeCalType))
setCalculationType(CalculationType.ISLAMIC_TBLA);
else if(localeCalType.startsWith("islamic"))
setCalculationType(CalculationType.ISLAMIC); // needs to be last so it's always the default if it's islamic-something-unhandled
else
setCalculationType(CalculationType.ISLAMIC_CIVIL); // default for any non-islamic calendar locale
}
/**
* {@inheritDoc}
* @stable ICU 3.8
*/
@Override
public String getType() {
return algorithm.getType().bcpType();
}
private void readObject(ObjectInputStream in) throws IOException,ClassNotFoundException {
in.defaultReadObject();
if (cType == null) {
// The serialized data was created by an ICU version before CalculationType
// was introduced.
cType = civil ? CalculationType.ISLAMIC_CIVIL : CalculationType.ISLAMIC;
}
setCalculationType(cType);
}
//-------------------------------------------------------------------------
// Temporal Calendar API.
//-------------------------------------------------------------------------
/**
* {@icu} Returns true if the date is in a leap year. Recalculate the current time
* field values if the time value has been changed by a call to setTime().
* This method is semantically const, but may alter the object in memory.
* A "leap year" is a year that contains more days than other years (for
* solar or lunar calendars) or more months than other years (for lunisolar
* calendars like Hebrew or Chinese), as defined in the ECMAScript Temporal
* proposal.
* @return true if the date in the fields is in a Temporal proposal
* defined leap year. False otherwise.
* @draft ICU 74
*/
public boolean inTemporalLeapYear() {
return getActualMaximum(DAY_OF_YEAR) == 355;
}
//-------------------------------------------------------------------------
// End of Temporal Calendar API
//-------------------------------------------------------------------------
/*
private static CalendarFactory factory;
public static CalendarFactory factory() {
if (factory == null) {
factory = new CalendarFactory() {
public Calendar create(TimeZone tz, ULocale loc) {
return new IslamicCalendar(tz, loc);
}
public String factoryName() {
return "Islamic";
}
};
}
return factory;
}
*/
}
| markusicu/icu | icu4j/main/core/src/main/java/com/ibm/icu/util/IslamicCalendar.java |
1,493 | /*
* Copyright (C) 2015 The Project Lombok Authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package lombok.core.debug;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Date;
import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.core.Version;
/**
* We make a number of assumptions in lombok code, and if these assumptions fail, we try to fall back to a 'least bad' scenario. However, we would prefer to
* just know about these cases, without confronting our users with error messages. The 'fix' is to log such assertion failures to this logger, which promptly
* ignores them, _unless_ you specifically enable logging them to a file. If you'd like to help out or want to assist in debugging, turn this on.
*/
public class AssertionLogger {
private static final String LOG_PATH;
static {
String log = System.getProperty("lombok.assertion.log", null);
if (log != null) {
LOG_PATH = log.isEmpty() ? null : log;
} else {
try {
log = System.getenv("LOMBOK_ASSERTION_LOG");
} catch (Exception e) {
log = null;
}
LOG_PATH = (log == null || log.isEmpty()) ? null : log;
}
}
private static final AtomicBoolean loggedIntro = new AtomicBoolean(false);
private static final String PROCESS_ID = generateProcessId();
private static final String ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
private static String generateProcessId() {
char[] ID = new char[4];
Random r = new Random();
for (int i = 0; i < ID.length; i++) ID[i] = ID_CHARS.charAt(r.nextInt(ID_CHARS.length()));
return new String(ID);
}
private static synchronized void logToFile(String msg) {
if (msg == null) return;
try {
OutputStream out = new FileOutputStream(LOG_PATH, true);
out.write(msg.getBytes("UTF-8"));
out.close();
} catch (Exception e) {
throw new RuntimeException("assertion logging can't write to log file", e);
}
}
private static void logIntro() {
if (loggedIntro.getAndSet(true)) return;
String version;
try {
version = Version.getFullVersion();
} catch (Exception e) {
version = Version.getVersion();
}
logToFile(String.format("{%s} [%s -- START %s]\n", PROCESS_ID, new Date(), version));
}
public static <T extends Throwable> T assertLog(String message, T throwable) {
if (LOG_PATH == null) return throwable;
logIntro();
if (message == null) message = "(No message)";
String stackMsg = "";
if (throwable != null) {
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
throwable.printStackTrace(pw);
pw.close();
stackMsg = "\n " + sw.toString().replace("\r", "").replace("\n", "\n ").trim();
}
logToFile(String.format("{%s} [%ty%<tm%<tdT%<tH%<tM%<tS.%<tL] %s%s\n", PROCESS_ID, new Date(), message, stackMsg));
return throwable;
}
public static void assertLog(String message) {
if (LOG_PATH == null) return;
assertLog(message, null);
}
}
| projectlombok/lombok | src/core/lombok/core/debug/AssertionLogger.java |
1,494 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.cassandra.db.rows;
import java.nio.ByteBuffer;
import java.util.Objects;
import org.apache.cassandra.db.Digest;
import org.apache.cassandra.db.DeletionPurger;
import org.apache.cassandra.db.TypeSizes;
import org.apache.cassandra.db.context.CounterContext;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.db.marshal.CollectionType;
import org.apache.cassandra.db.marshal.ValueAccessor;
import org.apache.cassandra.schema.ColumnMetadata;
import org.apache.cassandra.serializers.MarshalException;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.memory.ByteBufferCloner;
/**
* Base abstract class for {@code Cell} implementations.
*
* Unless you have a very good reason not to, every cell implementation
* should probably extend this class.
*/
public abstract class AbstractCell<V> extends Cell<V>
{
protected AbstractCell(ColumnMetadata column)
{
super(column);
}
public boolean isCounterCell()
{
return !isTombstone() && column.isCounterColumn();
}
public boolean isLive(long nowInSec)
{
return localDeletionTime() == NO_DELETION_TIME || (ttl() != NO_TTL && nowInSec < localDeletionTime());
}
public boolean isTombstone()
{
return localDeletionTime() != NO_DELETION_TIME && ttl() == NO_TTL;
}
public boolean isExpiring()
{
return ttl() != NO_TTL;
}
public Cell<?> markCounterLocalToBeCleared()
{
if (!isCounterCell())
return this;
ByteBuffer value = buffer();
ByteBuffer marked = CounterContext.instance().markLocalToBeCleared(value);
return marked == value ? this : new BufferCell(column, timestamp(), ttl(), localDeletionTime(), marked, path());
}
public Cell<?> purge(DeletionPurger purger, long nowInSec)
{
if (!isLive(nowInSec))
{
if (purger.shouldPurge(timestamp(), localDeletionTime()))
return null;
// We slightly hijack purging to convert expired but not purgeable columns to tombstones. The reason we do that is
// that once a column has expired it is equivalent to a tombstone but actually using a tombstone is more compact since
// we don't keep the column value. The reason we do it here is that 1) it's somewhat related to dealing with tombstones
// so hopefully not too surprising and 2) we want to this and purging at the same places, so it's simpler/more efficient
// to do both here.
if (isExpiring())
{
// Note that as long as the expiring column and the tombstone put together live longer than GC grace seconds,
// we'll fulfil our responsibility to repair. See discussion at
// http://cassandra-user-incubator-apache-org.3065146.n2.nabble.com/repair-compaction-and-tombstone-rows-td7583481.html
return BufferCell.tombstone(column, timestamp(), localDeletionTime() - ttl(), path()).purge(purger, nowInSec);
}
}
return this;
}
public Cell<?> purgeDataOlderThan(long timestamp)
{
return this.timestamp() < timestamp ? null : this;
}
@Override
public Cell<?> clone(ByteBufferCloner cloner)
{
CellPath path = path();
return new BufferCell(column, timestamp(), ttl(), localDeletionTime(), cloner.clone(buffer()), path == null ? null : path.clone(cloner));
}
// note: while the cell returned may be different, the value is the same, so if the value is offheap it must be referenced inside a guarded context (or copied)
public Cell<?> updateAllTimestamp(long newTimestamp)
{
return new BufferCell(column, isTombstone() ? newTimestamp - 1 : newTimestamp, ttl(), localDeletionTime(), buffer(), path());
}
public int dataSize()
{
CellPath path = path();
return TypeSizes.sizeof(timestamp())
+ TypeSizes.sizeof(ttl())
+ TypeSizes.sizeof(localDeletionTime())
+ valueSize()
+ (path == null ? 0 : path.dataSize());
}
public void digest(Digest digest)
{
if (isCounterCell())
digest.updateWithCounterContext(value(), accessor());
else
digest.update(value(), accessor());
digest.updateWithLong(timestamp())
.updateWithInt(ttl())
.updateWithBoolean(isCounterCell());
if (path() != null)
path().digest(digest);
}
public void validate()
{
if (ttl() < 0)
throw new MarshalException("A TTL should not be negative");
if (localDeletionTime() < 0)
throw new MarshalException("A local deletion time should not be negative");
if (localDeletionTime() == INVALID_DELETION_TIME)
throw new MarshalException("A local deletion time should not be a legacy overflowed value");
if (isExpiring() && localDeletionTime() == NO_DELETION_TIME)
throw new MarshalException("Shoud not have a TTL without an associated local deletion time");
// non-frozen UDTs require both the cell path & value to validate,
// so that logic is pushed down into ColumnMetadata. Tombstone
// validation is done there too as it also involves the cell path
// for complex columns
column().validateCell(this);
}
public boolean hasInvalidDeletions()
{
if (ttl() < 0 || localDeletionTime() == INVALID_DELETION_TIME || localDeletionTime() < 0 || (isExpiring() && localDeletionTime() == NO_DELETION_TIME))
return true;
return false;
}
public long maxTimestamp()
{
return timestamp();
}
public static <V1, V2> boolean equals(Cell<V1> left, Cell<V2> right)
{
return left.column().equals(right.column())
&& left.isCounterCell() == right.isCounterCell()
&& left.timestamp() == right.timestamp()
&& left.ttl() == right.ttl()
&& left.localDeletionTime() == right.localDeletionTime()
&& ValueAccessor.equals(left.value(), left.accessor(), right.value(), right.accessor())
&& Objects.equals(left.path(), right.path());
}
@Override
public boolean equals(Object other)
{
if (this == other)
return true;
if(!(other instanceof Cell))
return false;
return equals(this, (Cell<?>) other);
}
@Override
public int hashCode()
{
return Objects.hash(column(), isCounterCell(), timestamp(), ttl(), localDeletionTime(), accessor().hashCode(value()), path());
}
@Override
public String toString()
{
if (isCounterCell())
return String.format("[%s=%d ts=%d]", column().name, CounterContext.instance().total(value(), accessor()), timestamp());
AbstractType<?> type = column().type;
if (type instanceof CollectionType && type.isMultiCell())
{
CollectionType<?> ct = (CollectionType<?>) type;
return String.format("[%s[%s]=%s %s]",
column().name,
ct.nameComparator().getString(path().get(0)),
isTombstone() ? "<tombstone>" : ct.valueComparator().getString(value(), accessor()),
livenessInfoString());
}
if (isTombstone())
return String.format("[%s=<tombstone> %s]", column().name, livenessInfoString());
else
return String.format("[%s=%s %s]", column().name, safeToString(type), livenessInfoString());
}
private String safeToString(AbstractType<?> type)
{
try
{
return type.getString(value(), accessor());
}
catch (Exception e)
{
return "0x" + ByteBufferUtil.bytesToHex(buffer());
}
}
private String livenessInfoString()
{
if (isExpiring())
return String.format("ts=%d ttl=%d ldt=%d", timestamp(), ttl(), localDeletionTime());
else if (isTombstone())
return String.format("ts=%d ldt=%d", timestamp(), localDeletionTime());
else
return String.format("ts=%d", timestamp());
}
}
| apache/cassandra | src/java/org/apache/cassandra/db/rows/AbstractCell.java |
1,495 | package me.chanjar.weixin.common.util;
public class RandomUtils {
private static final String RANDOM_STR = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
private static final java.util.Random RANDOM = new java.util.Random();
public static String getRandomStr() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 16; i++) {
sb.append(RANDOM_STR.charAt(RANDOM.nextInt(RANDOM_STR.length())));
}
return sb.toString();
}
}
| Wechat-Group/WxJava | weixin-java-common/src/main/java/me/chanjar/weixin/common/util/RandomUtils.java |
1,496 | package replicatorg.drivers.gen3;
import java.util.EnumMap;
import java.util.Map;
import java.util.Vector;
import java.util.logging.Level;
import org.w3c.dom.Element;
import replicatorg.app.Base;
import replicatorg.drivers.RetryException;
import replicatorg.machine.model.AxisId;
import replicatorg.machine.model.MachineModel;
import replicatorg.machine.model.ToolModel;
import replicatorg.util.Point5d;
public class Makerbot4GDriver extends Sanguino3GDriver {
private boolean stepperExtruderFanEnabled = false;
public String getDriverName() {
return "Makerbot4G";
}
public void reset() {
// We should poll the machine for it's state here, but it is more important to have the
// fan on than off.
stepperExtruderFanEnabled = false;
super.reset();
}
public void stop(boolean abort) {
// Record the toolstate as off, so we don't excite the extruder motor in future moves.
machine.currentTool().disableMotor();
// We should stop the fan here, but it will be stopped for us by the super.
stepperExtruderFanEnabled = false;
super.stop(abort);
}
private Iterable<AxisId> getHijackedAxes(int toolhead){
Vector<AxisId> axes = new Vector<AxisId>();
AxisId toolheadAxis = machine.getTool(toolhead).getMotorStepperAxis();
if( extruderHijackedMap.containsKey( toolheadAxis ) )
axes.add(toolheadAxis);
return axes;
}
// /**
// * Returns the hijacked axes for the current tool.
// */
// @Deprecated
// private Iterable<AxisId> getHijackedAxes() {
// Vector<AxisId> axes = new Vector<AxisId>();
//
// for ( Map.Entry<AxisId,ToolModel> entry : stepExtruderMap.entrySet()) {
// ToolModel curTool = machine.currentTool();
// if (curTool.equals(entry.getValue())) {
// axes.add(curTool.getMotorStepperAxis());
// }
// }
// return axes;
// }
/**
* Returns the hijacked axes for all tools.
*/
private Iterable<AxisId> getAllHijackedAxes() {
Vector<AxisId> axes = new Vector<AxisId>();
for ( Map.Entry<AxisId,ToolModel> entry : extruderHijackedMap.entrySet()) {
AxisId axis = entry.getKey();
axes.add(axis);
}
return axes;
}
/** relies on currentTool too much **/
@Deprecated
protected void queueAbsolutePoint(Point5d steps, long micros) throws RetryException {
// Turn on fan if necessary
int toolhead = machine.currentTool().getIndex();
for (AxisId axis : getHijackedAxes(toolhead)) {
if (steps.axis(axis) != 0) {
enableStepperExtruderFan(true,toolhead);
}
}
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.QUEUE_POINT_EXT.getCode());
if (Base.logger.isLoggable(Level.FINE)) {
Base.logger.log(Level.FINE,"Queued absolute point " + steps + " at "
+ Long.toString(micros) + " usec.");
}
// just add them in now.
pb.add32((int) steps.x());
pb.add32((int) steps.y());
pb.add32((int) steps.z());
pb.add32((int) steps.a());
pb.add32((int) steps.b());
pb.add32((int) micros);
runCommand(pb.getPacket());
}
public void setCurrentPosition(Point5d p) throws RetryException {
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.SET_POSITION_EXT.getCode());
Point5d steps = machine.mmToSteps(p);
pb.add32((long) steps.x());
pb.add32((long) steps.y());
pb.add32((long) steps.z());
pb.add32((long) steps.a());
pb.add32((long) steps.b());
Base.logger.log(Level.FINE,"Set current position to " + p + " (" + steps
+ ")");
runCommand(pb.getPacket());
// Set the current position explicitly instead of calling the super, to avoid sending the current position command twice.
currentPosition.set(p);
// super.setCurrentPosition(p);
}
protected Point5d reconcilePosition() {
// If we're writing to a file, we can't actually know what the current position is.
if (fileCaptureOstream != null) {
return null;
}
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.GET_POSITION_EXT.getCode());
PacketResponse pr = runQuery(pb.getPacket());
Point5d steps;
try {
steps = new Point5d(pr.get32(), pr.get32(), pr.get32(), pr.get32(), pr.get32());
} catch(NullPointerException npe) {
Base.logger.log(Level.FINEST, "Invalid response packet");
return null;
}
// Base.logger.fine("Reconciling : "+machine.stepsToMM(steps).toString());
return machine.stepsToMM(steps);
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void enableMotor() throws RetryException {
Base.logger.fine("MakerBot4G.enableMotor()");//REMOVE
machine.currentTool().enableMotor();
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void disableMotor() throws RetryException {
Base.logger.fine("MakerBot4G.enableMotor()"); //REMOVE
machine.currentTool().disableMotor();
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void setMotorSpeedPWM(int pwm) throws RetryException {
machine.currentTool().setMotorSpeedPWM(pwm);
}
/**
* Overridden to not talk to the DC motor driver. This driver is reused for the stepper motor fan
*/
public void setMotorRPM(double rpm, int toolhead) throws RetryException {
if (toolhead == -1) {
machine.currentTool().setMotorSpeedRPM(rpm);
} else {
machine.getTool(toolhead).setMotorSpeedRPM(rpm);
}
}
public void enableDrives() throws RetryException {
enableStepperExtruderFan(true,machine.currentTool().getIndex());
super.enableDrives();
}
public void disableDrives() throws RetryException {
enableStepperExtruderFan(false,machine.currentTool().getIndex());
super.disableDrives();
}
/**
* Due to async command dispatch, this version should not be called.
*/
@Deprecated
public void enableStepperExtruderFan(boolean enabled) throws RetryException {
enableStepperExtruderFan(enabled, machine.currentTool().getIndex());
}
/**
* Will turn on/off the stepper extruder fan if it's not already in the correct state.
*
*/
public void enableStepperExtruderFan(boolean enabled, int toolIndex) throws RetryException {
// Always re-enable the fan when
if (this.stepperExtruderFanEnabled == enabled) return;
// FIXME: Should be called per hijacked axis with the correct tool
// our flag variable starts with motors enabled.
byte flags = (byte) (enabled ? 1 : 0);
// bit 1 determines direction...
flags |= 2;
Base.logger.log(Level.FINE,"Stepper Extruder fan w/flags: "
+ Integer.toBinaryString(flags));
// send it!
PacketBuilder pb = new PacketBuilder(MotherboardCommandCode.TOOL_COMMAND.getCode());
pb.add8((byte) toolIndex);
pb.add8(ToolCommandCode.TOGGLE_MOTOR_1.getCode());
pb.add8((byte) 1); // payload length
pb.add8(flags);
runCommand(pb.getPacket());
// Always use max PWM
pb = new PacketBuilder(MotherboardCommandCode.TOOL_COMMAND.getCode());
pb.add8((byte) toolIndex);
pb.add8(ToolCommandCode.SET_MOTOR_1_PWM.getCode());
pb.add8((byte) 1); // length of payload.
pb.add8((byte) 255);
runCommand(pb.getPacket());
this.stepperExtruderFanEnabled = enabled;
}
EnumMap<AxisId,ToolModel> extruderHijackedMap = new EnumMap<AxisId,ToolModel>(AxisId.class);
@Override
/**
* When the machine is set for this driver, some toolheads may poach the an extrusion axis.
*/
public void setMachine(MachineModel m) {
super.setMachine(m);
for (ToolModel tm : m.getTools()) {
Element e = (Element)tm.getXml();
if (e.hasAttribute("stepper_axis")) {
final String stepAxisStr = e.getAttribute("stepper_axis");
try {
AxisId axis = AxisId.valueOf(stepAxisStr.toUpperCase());
if (m.hasAxis(axis)) {
// If we're seizing an axis for an extruder, remove it from the available axes and get
// the data associated with that axis.
// Ted says: but we don't seem to be removing it from the available axes.
// We do that in the 4ga driver, but not here.
extruderHijackedMap.put(axis,tm);
} else {
Base.logger.severe("Tool claims unavailable axis "+axis.name());
}
} catch (IllegalArgumentException iae) {
Base.logger.severe("Unintelligible axis designator "+stepAxisStr);
}
}
}
}
@Override
public EnumMap<AxisId, String> getAxisAlises() {
/// Returns a set of Axes that are overridden or hijacked,
/// and a string to indicate what they are overridden or hijacked for.
EnumMap<AxisId,String> map = new EnumMap<AxisId,String>(AxisId.class);
for ( AxisId id : extruderHijackedMap.keySet() ) {
ToolModel t = extruderHijackedMap.get(id);
map.put(id,t.getName());
}
return map;
}
@Override
public String getMachineType(){ return "Thing-O-Matic/CupCake CNC"; }
}
| Winter-Guerra/ReplicatorG | src/replicatorg/drivers/gen3/Makerbot4GDriver.java |
1,497 | package com.github.binarywang.wxpay.v3.util;
import me.chanjar.weixin.common.error.WxRuntimeException;
import java.security.*;
import java.util.Base64;
import java.util.Random;
/**
* @author cloudx
*/
public class SignUtils {
public static String sign(String string, PrivateKey privateKey) {
try {
Signature sign = Signature.getInstance("SHA256withRSA");
sign.initSign(privateKey);
sign.update(string.getBytes());
return Base64.getEncoder().encodeToString(sign.sign());
} catch (NoSuchAlgorithmException e) {
throw new WxRuntimeException("当前Java环境不支持SHA256withRSA", e);
} catch (SignatureException e) {
throw new WxRuntimeException("签名计算失败", e);
} catch (InvalidKeyException e) {
throw new WxRuntimeException("无效的私钥", e);
}
}
/**
* 随机生成32位字符串.
*/
public static String genRandomStr() {
return genRandomStr(32);
}
/**
* 生成随机字符串
*
* @param length 字符串长度
* @return 随机字符串
*/
public static String genRandomStr(int length) {
String base = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
Random random = new Random();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < length; i++) {
int number = random.nextInt(base.length());
sb.append(base.charAt(number));
}
return sb.toString();
}
}
| Wechat-Group/WxJava | weixin-java-pay/src/main/java/com/github/binarywang/wxpay/v3/util/SignUtils.java |
1,498 | package me.chanjar.weixin.common.util.http;
import java.nio.charset.StandardCharsets;
import org.apache.commons.lang3.StringUtils;
public class URIUtil {
private static final String ALLOWED_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.!~*'()";
public static String encodeURIComponent(String input) {
if (StringUtils.isEmpty(input)) {
return input;
}
int l = input.length();
StringBuilder o = new StringBuilder(l * 3);
for (int i = 0; i < l; i++) {
String e = input.substring(i, i + 1);
if (!ALLOWED_CHARS.contains(e)) {
byte[] b = e.getBytes(StandardCharsets.UTF_8);
o.append(getHex(b));
continue;
}
o.append(e);
}
return o.toString();
}
private static String getHex(byte[] buf) {
StringBuilder o = new StringBuilder(buf.length * 3);
for (byte aBuf : buf) {
int n = aBuf & 0xff;
o.append("%");
if (n < 0x10) {
o.append("0");
}
o.append(Long.toString(n, 16).toUpperCase());
}
return o.toString();
}
}
| Wechat-Group/WxJava | weixin-java-common/src/main/java/me/chanjar/weixin/common/util/http/URIUtil.java |
1,499 | package com.codeborne.selenide.webdriver;
import com.codeborne.selenide.Browser;
import com.codeborne.selenide.Config;
import org.apache.commons.io.IOUtils;
import org.openqa.selenium.Proxy;
import org.openqa.selenium.SessionNotCreatedException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.firefox.FirefoxOptions;
import org.openqa.selenium.firefox.FirefoxProfile;
import org.openqa.selenium.firefox.GeckoDriverService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.CheckReturnValue;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import javax.annotation.ParametersAreNonnullByDefault;
import java.io.File;
import java.io.UncheckedIOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import static java.lang.Boolean.parseBoolean;
import static java.lang.Integer.parseInt;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.commons.lang3.StringUtils.isNotEmpty;
@ParametersAreNonnullByDefault
public class FirefoxDriverFactory extends AbstractDriverFactory {
private static final Logger log = LoggerFactory.getLogger(FirefoxDriverFactory.class);
@Override
@CheckReturnValue
@Nonnull
public WebDriver create(Config config, Browser browser, @Nullable Proxy proxy, @Nullable File browserDownloadsFolder) {
SessionNotCreatedException failure = null;
for (int retries = 0; retries < 5; retries++) {
try {
return new FirefoxDriver(createDriverService(config), createCapabilities(config, browser, proxy, browserDownloadsFolder));
}
catch (SessionNotCreatedException probablyPortAlreadyUsed) {
log.error("Failed to start firefox", probablyPortAlreadyUsed);
failure = probablyPortAlreadyUsed;
}
}
throw failure;
}
@CheckReturnValue
@Nonnull
protected GeckoDriverService createDriverService(Config config) {
return withLog(config, new GeckoDriverService.Builder());
}
@Override
@CheckReturnValue
@Nonnull
public FirefoxOptions createCapabilities(Config config, Browser browser,
@Nullable Proxy proxy, @Nullable File browserDownloadsFolder) {
FirefoxOptions initialOptions = new FirefoxOptions();
setHeadless(config, initialOptions);
setupBrowserBinary(config, initialOptions);
setupPreferences(initialOptions);
setupDownloadsFolder(initialOptions, browserDownloadsFolder);
final FirefoxOptions options = initialOptions.merge(createCommonCapabilities(new FirefoxOptions(), config, browser, proxy));
transferFirefoxProfileFromSystemProperties(options)
.ifPresent(profile -> options.setProfile(profile));
injectFirefoxPrefs(options);
return options;
}
protected void setHeadless(Config config, FirefoxOptions initialOptions) {
if (config.headless()) {
initialOptions.addArguments("-headless");
}
}
protected void setupBrowserBinary(Config config, FirefoxOptions firefoxOptions) {
if (isNotEmpty(config.browserBinary())) {
log.info("Using browser binary: {}", config.browserBinary());
firefoxOptions.setBinary(config.browserBinary());
}
}
protected void setupPreferences(FirefoxOptions firefoxOptions) {
firefoxOptions.addPreference("network.automatic-ntlm-auth.trusted-uris", "http://,https://");
firefoxOptions.addPreference("network.automatic-ntlm-auth.allow-non-fqdn", true);
firefoxOptions.addPreference("network.negotiate-auth.delegation-uris", "http://,https://");
firefoxOptions.addPreference("network.negotiate-auth.trusted-uris", "http://,https://");
firefoxOptions.addPreference("network.http.phishy-userpass-length", 255);
firefoxOptions.addPreference("security.csp.enable", false);
firefoxOptions.addPreference("network.proxy.no_proxies_on", "");
firefoxOptions.addPreference("network.proxy.allow_hijacking_localhost", true);
}
protected void setupDownloadsFolder(FirefoxOptions firefoxOptions, @Nullable File browserDownloadsFolder) {
if (browserDownloadsFolder != null) {
firefoxOptions.addPreference("browser.download.dir", browserDownloadsFolder.getAbsolutePath());
}
firefoxOptions.addPreference("browser.helperApps.neverAsk.saveToDisk", popularContentTypes());
firefoxOptions.addPreference("pdfjs.disabled", true); // disable the built-in viewer
firefoxOptions.addPreference("browser.download.folderList", 2); // 0=Desktop, 1=Downloads, 2="reuse last location"
}
@CheckReturnValue
@Nonnull
protected String popularContentTypes() {
try {
return String.join(";", IOUtils.readLines(getClass().getResourceAsStream("/content-types.properties"), UTF_8));
}
catch (UncheckedIOException e) {
return "text/plain;text/csv;application/zip;application/pdf;application/octet-stream;" +
"application/msword;application/vnd.ms-excel;text/css;text/html";
}
}
@CheckReturnValue
@Nonnull
protected Map<String, String> collectFirefoxProfileFromSystemProperties() {
String prefix = "firefoxprofile.";
Map<String, String> result = new HashMap<>();
for (String key : System.getProperties().stringPropertyNames()) {
if (key.startsWith(prefix)) {
String capability = key.substring(prefix.length());
String value = System.getProperties().getProperty(key);
result.put(capability, value);
}
}
return result;
}
@Nonnull
@CheckReturnValue
protected Optional<FirefoxProfile> transferFirefoxProfileFromSystemProperties(FirefoxOptions firefoxOptions) {
Map<String, String> ffProfile = collectFirefoxProfileFromSystemProperties();
if (ffProfile.isEmpty()) {
return Optional.empty();
}
FirefoxProfile profile = Optional.ofNullable(firefoxOptions.getProfile()).orElseGet(FirefoxProfile::new);
for (Map.Entry<String, String> entry : ffProfile.entrySet()) {
String capability = entry.getKey();
String value = entry.getValue();
log.debug("Use {}={}", capability, value);
setCapability(profile, capability, value);
}
return Optional.of(profile);
}
protected void setCapability(FirefoxProfile profile, String capability, String value) {
if (isBoolean(value)) {
profile.setPreference(capability, parseBoolean(value));
}
else if (isInteger(value)) {
profile.setPreference(capability, parseInt(value));
}
else {
profile.setPreference(capability, value);
}
}
private void injectFirefoxPrefs(FirefoxOptions options) {
if (options.getCapability("moz:firefoxOptions") != null) {
Map<String, Map<String, Object>> mozOptions = cast(options.getCapability("moz:firefoxOptions"));
if (mozOptions.containsKey("prefs")) {
for (Map.Entry<String, Object> pref : mozOptions.get("prefs").entrySet()) {
options.addPreference(pref.getKey(), pref.getValue());
}
}
}
}
}
| selenide/selenide | src/main/java/com/codeborne/selenide/webdriver/FirefoxDriverFactory.java |
1,500 | /**
* Copyright (C) Zhang,Yuexiang (xfeep)
*
*/
package nginx.clojure;
import static nginx.clojure.MiniConstants.NGINX_VER;
import static nginx.clojure.MiniConstants.NGX_HTTP_CLOJURE_REQ_POOL_OFFSET;
import static nginx.clojure.NginxClojureRT.UNSAFE;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.Socket;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import nginx.clojure.java.ArrayMap;
import nginx.clojure.logger.LoggerService;
import nginx.clojure.logger.TinyLogService;
import nginx.clojure.net.NginxClojureSocketFactory;
import nginx.clojure.net.NginxClojureSocketImpl;
import nginx.clojure.wave.JavaAgent;
import sun.misc.Unsafe;
public class NginxClojureRT extends MiniConstants {
public static long[] MEM_INDEX;
public static Thread NGINX_MAIN_THREAD;
/*use it carefully!!*/
public static Unsafe UNSAFE = HackUtils.UNSAFE;
private static List<NginxHandler> HANDLERS = new ArrayList<NginxHandler>();
//mapping clojure code pointer address to clojure code id
// private static Map<Long, Integer> CODE_MAP = new HashMap<Long, Integer>();
public static int MODE = MODE_DEFAULT;
public static ConcurrentHashMap<Long, Object> POSTED_EVENTS_DATA = new ConcurrentHashMap<Long, Object>();
private static ExecutorService eventDispather;
public static CompletionService<WorkerResponseContext> workers;
public static ExecutorService workerExecutorService;
//only for testing, e.g. with lein-ring where no coroutine support
public static ExecutorService threadPoolOnlyForTestingUsage;
public static boolean coroutineEnabled = false;
public static LoggerService log;
public static String processId="-1";
public native static long ngx_palloc(long pool, long size);
public native static long ngx_pcalloc(long pool, long size);
public native static long ngx_array_create(long pool, long n, long size);
public native static long ngx_array_init(long array, long pool, long n, long size);
public native static void ngx_array_destory(long array);
public native static long ngx_array_push_n(long array, long n);
public native static long ngx_list_create(long pool, long n, long size);
public native static long ngx_list_init(long list, long pool, long n, long size);
public native static long ngx_list_push(long list);
public native static long ngx_create_temp_buf(long r, long size);
public native static long ngx_create_temp_buf_by_jstring(long r, String s, int last_buf);
public native static long ngx_create_temp_buf_by_obj(long r, Object obj, long offset, long len, int last_buf);
public native static long ngx_create_file_buf(long r, long file, long name_len, int last_buf);
public native static long ngx_http_set_content_type(long r);
public native static long ngx_http_send_header(long r);
public native static void ngx_http_clear_header_and_reset_ctx_phase(long r, long phase, boolean clearHeader);
public static void ngx_http_clear_header_and_reset_ctx_phase(long r, long phase) {
ngx_http_clear_header_and_reset_ctx_phase(r, phase, true);
}
public native static void ngx_http_ignore_next_response(long r);
public native static long ngx_http_output_filter(long r, long chain);
public native static void ngx_http_finalize_request(long r, long rc);
public native static void ngx_http_filter_finalize_request(long r, long rc);
public native static long ngx_http_discard_request_body(long r);
/**
*
* @param r nginx http request
* @param chain -1 means continue next header filter otherwise continue next body filter
* @return
*/
public native static long ngx_http_filter_continue_next(long r, long chain, long oldChain);
/**
* last_buf can be either of {@link MiniConstants#NGX_CLOJURE_BUF_LAST_OF_NONE} {@link MiniConstants#NGX_CLOJURE_BUF_LAST_OF_CHAIN}, {@link MiniConstants#NGX_CLOJURE_BUF_LAST_OF_RESPONSE}
*/
public native static long ngx_http_clojure_mem_init_ngx_buf(long buf, Object obj, long offset, long len, int last_buf);
public native static long ngx_http_clojure_mem_build_temp_chain(long req, long preChain, Object obj, long offset, long len);
public native static long ngx_http_clojure_mem_build_file_chain(long req, long preChain, Object path, long offset, long len, boolean safe);
public native static long ngx_http_clojure_mem_get_chain_info(long chain, Object buf, long offset, long len);
public native static long ngx_http_clojure_mem_get_obj_addr(Object obj);
public native static long ngx_http_clojure_mem_get_list_size(long l);
public native static long ngx_http_clojure_mem_get_list_item(long l, long i);
public native static long ngx_http_clojure_mem_get_headers_size(long header, int flag);
public native static long ngx_http_clojure_mem_get_headers_items(long header, long i, int flag, Object buf, long off, long maxoff);
public native static void ngx_http_clojure_mem_copy_to_obj(long src, Object obj, long offset, long len);
public native static void ngx_http_clojure_mem_copy_to_addr(Object obj, long offset, long dest, long len);
public native static void ngx_http_clojure_mem_shadow_copy_ngx_str(long s, long t);
public native static long ngx_http_clojure_mem_copy_header_buf(long r, Object buf, long offset, long len);
public native static long ngx_http_clojure_mem_get_header(long headers, Object buf, long nameOffset, long nameLen, long valueOffset, long bufMaxOffset);
/**
* It will return 0 if there's no request body .
* It will return a value < 0 if there's request body file, -value is the length of the file path, and addr(buf, offset) is stored with the path data
* It will return a value > 0 if there's a in-memory request body, value is the length of the body and addr(buf, offset) is stored with a address of the body data
*/
public native static long ngx_http_clojure_mem_get_request_body(long r, Object buf, long offset, long limit);
public native static long ngx_http_clojure_mem_get_variable(long r, long name, long varlenPtr);
public native static long ngx_http_clojure_mem_set_variable(long r, long name, long val, long vlen);
/**
* return the old value of r->count or error code (< 0)
*/
public native static long ngx_http_clojure_mem_inc_req_count(long r, long detal);
public native static void ngx_http_clojure_mem_continue_current_phase(long r, long rc);
public native static long ngx_http_clojure_mem_get_module_ctx_phase(long r);
public native static long ngx_http_clojure_mem_get_module_ctx_upgrade(long r);
public native static long ngx_http_clojure_mem_post_event(long e, Object data, long offset);
public native static long ngx_http_clojure_mem_broadcast_event(long e, Object data, long offset, long hasSelf);
public native static long ngx_http_clojure_mem_read_raw_pipe(long p, Object buf, long offset, long len);
/**
* @deprecated
*/
public static long ngx_http_cleanup_add(long r, final ChannelListener<Object> listener, Object data) {
return ngx_http_clojure_add_listener(r, new ChannelCloseAdapter<Object>() {
@Override
public void onClose(Object data) throws IOException {
listener.onClose(data);
}
}, data, 0);
}
private native static long ngx_http_clojure_add_listener(long r, @SuppressWarnings("rawtypes") ChannelListener listener, Object data, int replace);
public static void addListener(NginxRequest r, @SuppressWarnings("rawtypes") ChannelListener listener, Object data, int replace) {
addListener(r.nativeRequest(), listener, data, replace);
}
@SuppressWarnings("rawtypes")
public static void addListener(long r, ChannelListener listener, Object data, int replace) {
if ( ngx_http_clojure_add_listener(r, listener, data, replace) != 0) {
throw new IllegalStateException("invalid request which is cleaned!");
}
}
public static long ngx_http_clojure_websocket_upgrade(long req) {
return ngx_http_clojure_websocket_upgrade(req, 1);
}
/**
* flag can be either of
* 0 do nothing for non-websocket request
* 1 error for non-websocket request
*/
public native static long ngx_http_clojure_websocket_upgrade(long req, int flag);
/**
* flag can be either of or combined of
* 0
* NGX_HTTP_CLOJURE_EVENT_HANDLER_FLAG_READ 1
* NGX_HTTP_CLOJURE_EVENT_HANDLER_FLAG_WRITE 2
*/
public native static void ngx_http_hijack_turn_on_event_handler(long req, int flag);
public native static long ngx_http_hijack_read(long req, Object buf, long offset, long len);
public native static long ngx_http_hijack_write(long req, Object buf, long offset, long len);
/**
* flag can be either of {@link MiniConstants#NGX_CLOJURE_BUF_FLUSH_FLAG} {@link MiniConstants#NGX_CLOJURE_BUF_LAST_FLAG}
*/
public native static long ngx_http_hijack_send(long req, Object buf, long offset, long len, int flag);
/**
* flag can be either of {@link MiniConstants#NGX_CLOJURE_BUF_FLUSH_FLAG} {@link MiniConstants#NGX_CLOJURE_BUF_LAST_FLAG}
*/
public native static long ngx_http_hijack_send_header(long req, int flag);
public native static long ngx_http_hijack_send_header(long req, Object buf, long offset, long len, int flag);
public native static long ngx_http_hijack_send_chain(long req, long chain, int flag);
public native static void ngx_http_hijack_set_async_timeout(long req, long timeout);
// public native static long ngx_http_clojure_mem_get_body_tmp_file(long r);
private static AppEventListenerManager appEventListenerManager;
// //for default or coroutine mode
// private static ByteBuffer defaultByteBuffer;
// private static CharBuffer defaultCharBuffer;
//It was only for thread pool mode
//But now we unify temp bufferes for thread pool mode & default & coroutine mode because maybe user can invoke some api in their own thread
private final static ThreadLocal<ByteBuffer> threadLocalByteBuffers = new ThreadLocal<ByteBuffer>();
private final static ThreadLocal<CharBuffer> threadLocalCharBuffers = new ThreadLocal<CharBuffer>();
private final static ConcurrentLinkedQueue<HijackEvent> pooledEvents = new ConcurrentLinkedQueue<NginxClojureRT.HijackEvent>();
static {
//be friendly to lein ring testing
try {
getLog();
initUnsafe();
appEventListenerManager = new AppEventListenerManager();
processId = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
} catch (Throwable e) {
//to be friendly to nginx jni error log
e.printStackTrace();
}
}
public static AppEventListenerManager getAppEventListenerManager() {
return appEventListenerManager;
}
public static void setAppEventListenerManager(AppEventListenerManager appEventListenerManager) {
NginxClojureRT.appEventListenerManager = appEventListenerManager;
}
public static String formatVer(long ver) {
long f = ver / 1000000;
long s = ver / 1000 - f * 1000;
long t = ver - s * 1000 - f * 1000000;
return f + "." + s + "." + t;
}
public static final class WorkerResponseContext {
public final NginxResponse response;
public final NginxRequest request;
public long chain;
public WorkerResponseContext(NginxResponse resp, NginxRequest req) {
super();
this.response = resp;
this.request = req;
if (resp.type() >= 0) {
if (req.isReleased()) {
chain = 0;
}else {
chain = req.handler().buildOutputChain(resp);
}
}else {
if (resp.type() == NginxResponse.TYPE_FAKE_BODY_FILTER_TAG) {
// chain = req.handler().buildOutputChain(resp);
chain = 0;
}else {
chain = 0;
}
}
}
}
public static class HijackEvent {
protected NginxHttpServerChannel channel;
protected Object message; //maybe NginxResponse or for complex return value
protected volatile long offset; //maybe chain of response or also as simple return value
protected int len;
protected int flag;
protected Semaphore semaphore;
public HijackEvent() {
semaphore = new Semaphore(0);
}
public HijackEvent reset(NginxHttpServerChannel channel, Object message, long off, int len, int flag) {
this.channel = channel;
this.message = message;
this.offset = off;
this.len = len;
this.flag = flag;
return this;
}
public HijackEvent reset(NginxHttpServerChannel channel, NginxResponse response, long chain) {
this.channel = channel;
this.message = response;
this.offset = chain;
return this;
}
public boolean awaitForFinish(long timeout) throws InterruptedException {
return semaphore.tryAcquire(timeout, TimeUnit.MILLISECONDS);
}
public void awaitForFinish() throws InterruptedException {
semaphore.acquire();
}
public void complete(long v) {
this.offset = v;
semaphore.release();
}
public void complete(Object v) {
this.message = v;
semaphore.release();
}
public void recycle() {
channel = null;
message = null;
semaphore.drainPermits();
}
}
public static final class EventDispatherRunnable implements Runnable {
final CompletionService<WorkerResponseContext> workers;
public EventDispatherRunnable(final CompletionService<WorkerResponseContext> workers) {
this.workers = workers;
}
@Override
public void run() {
while (true) {
try {
Future<WorkerResponseContext> respFuture = workers.take();
WorkerResponseContext ctx = respFuture.get();
if (ctx.response.type() == NginxResponse.TYPE_FAKE_ASYNC_TAG
|| ctx.request.phase() == NGX_HTTP_LOG_PHASE) {
continue;
}
long r = ctx.response.request().nativeRequest();
savePostEventData(r, ctx);
ngx_http_clojure_mem_post_event(r, null, 0);
} catch (InterruptedException e) {
log.warn("jvm workers dispather has been interrupted!");
break;
} catch (ExecutionException e) {
log.error("unexpected ExecutionException!", e);
}catch (Throwable e) {
log.error("unexpected Error!", e);
}
}
}
}
public static void savePostEventData(long id, Object o) {
while (POSTED_EVENTS_DATA.putIfAbsent(id, o) != null) {
try {
Thread.sleep(0);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.warn("savePostEventData interrupted!");
return;
}
}
}
private static void initWorkers(int n) {
if (JavaAgent.db != null) {
if (JavaAgent.db.isDoNothing()) {
coroutineEnabled = false;
log.warn("java agent disabled so we turn off coroutine support!");
if (n == 0) {
n = -1;
}
}else if (JavaAgent.db.isRunTool()) {
coroutineEnabled = false;
log.warn("we just run for generatation of coroutine waving configuration NOT for general cases!!!");
/*
* Because sometimes we need to access services provide by the same nginx instance,
* e.g. proxyed external http service, so when turn on run tool mode we need thread
* pool to make worker not blocked otherwise we can not continue the process of generatation
* of coroutine waving configuration.*/
if (n < 0) {
log.warn("enable thread pool mode for run tool mode so that %s",
"worker won't be blocked when access services provide by the same nginx instance");
n = Runtime.getRuntime().availableProcessors() * 2;
}
} else {
log.info("java agent configured so we turn on coroutine support!");
if (JavaAgent.db.isEnableNativeCoroutine()) {
Coroutine.prepareNative();
}
if (n > 0) {
log.warn("found jvm_workers = %d, and not = 0 we just ignored!", n);
}
n = 0;
}
}
if (n == 0) {
if (JavaAgent.db == null) {
log.warn("java agent NOT configured so we turn off coroutine support!");
coroutineEnabled = false;
}else {
coroutineEnabled = true;
MODE = MODE_COROUTINE;
try {
Socket.setSocketImplFactory(new NginxClojureSocketFactory());
} catch (IOException e) {
throw new RuntimeException("can not init NginxClojureSocketFactory!", e);
}
}
// defaultByteBuffer = ByteBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE);
// defaultCharBuffer = CharBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE);
return;
}
if (n < 0) {
// defaultByteBuffer = ByteBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE);
// defaultCharBuffer = CharBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE);
return;
}
log.info("nginx-clojure run on thread pool mode, coroutineEnabled=false");
MODE = MODE_THREAD;
// threadLocalByteBuffers = new ThreadLocal<ByteBuffer>();
// threadLocalCharBuffers = new ThreadLocal<CharBuffer>();
eventDispather = Executors.newSingleThreadExecutor(new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "nginx-clojure-eventDispather");
}
});
workers = new ExecutorCompletionService<WorkerResponseContext>(workerExecutorService = Executors.newFixedThreadPool(n, new ThreadFactory() {
final AtomicLong counter = new AtomicLong(0);
public Thread newThread(Runnable r) {
return new Thread(r, "nginx-clojure-worker-" + counter.getAndIncrement());
}
}));
eventDispather.submit(new EventDispatherRunnable(workers));
}
private static void destoryWorkers() {
if (workerExecutorService != null) {
workerExecutorService.shutdown();
try {
workerExecutorService.awaitTermination(1000, TimeUnit.SECONDS);
} catch (InterruptedException e) {
getLog().error("shutdown workerExecutorService error", e);
}
}
if (eventDispather != null) {
eventDispather.shutdownNow();
}
if (threadPoolOnlyForTestingUsage != null) {
threadPoolOnlyForTestingUsage.shutdownNow();
}
workerExecutorService = null;
eventDispather = null;
threadPoolOnlyForTestingUsage = null;
workers = null;
}
public static synchronized ExecutorService initThreadPoolOnlyForTestingUsage() {
if (threadPoolOnlyForTestingUsage == null) {
threadPoolOnlyForTestingUsage = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()+2, new ThreadFactory() {
final AtomicLong counter = new AtomicLong(0);
public Thread newThread(Runnable r) {
return new Thread(r, "nginx-clojure-only4test-thread" + counter.getAndIncrement());
}
});
}
return threadPoolOnlyForTestingUsage;
}
private static NginxHeaderHolder safeBuildKnownTableEltHeaderHolder(String name, long offset, long headersOffset) {
if (offset >= 0) {
return new TableEltHeaderHolder(name, offset, headersOffset);
}
return new UnknownHeaderHolder(name, headersOffset);
}
private static NginxHeaderHolder safeBuildKnownArrayHeaderHolder(String name, long offset, long headersOffset) {
if (offset >= 0) {
if (NGINX_VER >= 1023000) {
return new EtlListHeaderHolder(name, offset, headersOffset);
}
return new ArrayHeaderHolder(name, offset, headersOffset);
}
return new UnknownHeaderHolder(name, headersOffset);
}
public static void initStringAddrMapsByNativeAddr(Map<String, Long> map, long addr) {
while (true) {
String var = fetchNGXString(addr, DEFAULT_ENCODING);
if (var == null) {
break;
}
map.put(var, addr);
addr += NGX_HTTP_CLOJURE_STR_SIZE;
}
}
private static synchronized void initMemIndex(long idxpt) {
getLog();
initUnsafe();
if (log.isDebugEnabled()) {
log.debug("jvm classpath:\n " + System.getProperty("java.class.path"));
}
NGINX_MAIN_THREAD = Thread.currentThread();
BYTE_ARRAY_OFFSET = UNSAFE.arrayBaseOffset(byte[].class);
try {
STRING_CHAR_ARRAY_OFFSET = UNSAFE.objectFieldOffset(String.class.getDeclaredField("value"));
} catch (Throwable e) { // never happen!
UNSAFE.throwException(e);
}
long[] index = new long[NGX_HTTP_CLOJURE_MEM_IDX_END + 1];
for (int i = 0; i < NGX_HTTP_CLOJURE_MEM_IDX_END + 1; i++) {
index[i] = UNSAFE.getLong(idxpt + i * 8);
}
MEM_INDEX = index;
NGX_HTTP_CLOJURE_UINT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_UINT_SIZE_IDX];
NGX_HTTP_CLOJURE_PTR_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_PTR_SIZE_IDX];
NGX_HTTP_CLOJURE_STR_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_STR_SIZE_IDX];
NGX_HTTP_CLOJURE_STR_LEN_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_STR_LEN_IDX];
NGX_HTTP_CLOJURE_STR_DATA_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_STR_DATA_IDX];
NGX_HTTP_CLOJURE_SIZET_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_SIZET_SIZE_IDX];
NGX_HTTP_CLOJURE_OFFT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_OFFT_SIZE_IDX];
NGX_HTTP_CLOJURE_BUFFER_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_BUFFER_SIZE_IDX];
NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE = (int) NGX_HTTP_CLOJURE_BUFFER_SIZE;
NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_LINE_SIZE = Math.max(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE/2, NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE-1024);
NGX_HTTP_CLOJURE_TELT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_TELT_SIZE_IDX];
NGX_HTTP_CLOJURE_TEL_HASH_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_TEL_HASH_IDX];
NGX_HTTP_CLOJURE_TEL_KEY_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_TEL_KEY_IDX];
NGX_HTTP_CLOJURE_TEL_VALUE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_TEL_VALUE_IDX];
NGX_HTTP_CLOJURE_TEL_LOWCASE_KEY_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_TEL_LOWCASE_KEY_IDX];
NGX_HTTP_CLOJURE_TEL_NEXT_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_TEL_NEXT_IDX];
NGX_HTTP_CLOJURE_REQT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_REQT_SIZE_IDX];
NGX_HTTP_CLOJURE_REQ_METHOD_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_REQ_METHOD_IDX];
NGX_HTTP_CLOJURE_REQ_URI_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_REQ_URI_IDX];
NGX_HTTP_CLOJURE_REQ_ARGS_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_REQ_ARGS_IDX];
NGX_HTTP_CLOJURE_REQ_HEADERS_IN_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_REQ_HEADERS_IN_IDX];
NGX_HTTP_CLOJURE_REQ_POOL_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_REQ_POOL_IDX];
NGX_HTTP_CLOJURE_REQ_HEADERS_OUT_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_REQ_HEADERS_OUT_IDX];
NGX_HTTP_CLOJURE_CHAINT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_CHAINT_SIZE_IDX];
NGX_HTTP_CLOJURE_CHAIN_BUF_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_CHAIN_BUF_IDX];
NGX_HTTP_CLOJURE_CHAIN_NEXT_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_CHAIN_NEXT_IDX];
NGX_HTTP_CLOJURE_VARIABLET_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_VARIABLET_SIZE_IDX];
NGX_HTTP_CLOJURE_CORE_VARIABLES_ADDR = MEM_INDEX[NGX_HTTP_CLOJURE_CORE_VARIABLES_ADDR_IDX];
NGX_HTTP_CLOJURE_HEADERS_NAMES_ADDR = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERS_NAMES_ADDR_IDX];
NGX_HTTP_CLOJURE_ARRAYT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_ARRAYT_SIZE_IDX];
NGX_HTTP_CLOJURE_ARRAY_ELTS_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_ARRAY_ELTS_IDX];
NGX_HTTP_CLOJURE_ARRAY_NELTS_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_ARRAY_NELTS_IDX];
NGX_HTTP_CLOJURE_ARRAY_SIZE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_ARRAY_SIZE_IDX];
NGX_HTTP_CLOJURE_ARRAY_NALLOC_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_ARRAY_NALLOC_IDX];
NGX_HTTP_CLOJURE_ARRAY_POOL_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_ARRAY_POOL_IDX];
NGX_HTTP_CLOJURE_KEYVALT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_KEYVALT_SIZE_IDX];
NGX_HTTP_CLOJURE_KEYVALT_KEY_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_KEYVALT_KEY_IDX];
NGX_HTTP_CLOJURE_KEYVALT_VALUE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_KEYVALT_VALUE_IDX];
NGX_HTTP_CLOJURE_MIME_TYPES_ADDR = MEM_INDEX[NGX_HTTP_CLOJURE_MIME_TYPES_ADDR_IDX];
NGX_HTTP_CLOJURE_HEADERSIT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSIT_SIZE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_HOST_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_HOST_IDX];
NGX_HTTP_CLOJURE_HEADERSI_CONNECTION_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_CONNECTION_IDX];
NGX_HTTP_CLOJURE_HEADERSI_IF_MODIFIED_SINCE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_IF_MODIFIED_SINCE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_IF_UNMODIFIED_SINCE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_IF_UNMODIFIED_SINCE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_USER_AGENT_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_USER_AGENT_IDX];
NGX_HTTP_CLOJURE_HEADERSI_REFERER_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_REFERER_IDX];
NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_IDX];
NGX_HTTP_CLOJURE_HEADERSI_CONTENT_TYPE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_CONTENT_TYPE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_RANGE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_RANGE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_IF_RANGE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_IF_RANGE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_TRANSFER_ENCODING_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_TRANSFER_ENCODING_IDX];
NGX_HTTP_CLOJURE_HEADERSI_EXPECT_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_EXPECT_IDX];
//#if (NGX_HTTP_GZIP)
NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_ENCODING_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_ENCODING_IDX];
NGX_HTTP_CLOJURE_HEADERSI_VIA_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_VIA_IDX];
//#endif
NGX_HTTP_CLOJURE_HEADERSI_AUTHORIZATION_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_AUTHORIZATION_IDX];
NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_IDX];
//#if (NGX_HTTP_PROXY || NGX_HTTP_REALIP || NGX_HTTP_GEO)
NGX_HTTP_CLOJURE_HEADERSI_X_FORWARDED_FOR_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_X_FORWARDED_FOR_IDX];
//#endif
//#if (NGX_HTTP_REALIP)
NGX_HTTP_CLOJURE_HEADERSI_X_REAL_IP_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_X_REAL_IP_IDX];
//#endif
//#if (NGX_HTTP_HEADERS)
NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_IDX];
NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_LANGUAGE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_LANGUAGE_IDX];
//#endif
//#if (NGX_HTTP_DAV)
NGX_HTTP_CLOJURE_HEADERSI_DEPTH_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_DEPTH_IDX];
NGX_HTTP_CLOJURE_HEADERSI_DESTINATION_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_DESTINATION_IDX];
NGX_HTTP_CLOJURE_HEADERSI_OVERWRITE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_OVERWRITE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_DATE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_DATE_IDX];
//#endif
NGX_HTTP_CLOJURE_HEADERSI_USER_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_USER_IDX];
NGX_HTTP_CLOJURE_HEADERSI_PASSWD_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_PASSWD_IDX];
NGX_HTTP_CLOJURE_HEADERSI_COOKIE_OFFSET =MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_COOKIE_IDX];
NGX_HTTP_CLOJURE_HEADERSI_SERVER_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_SERVER_IDX];
NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_N_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_N_IDX];
NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_N_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_N_IDX];
NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSI_HEADERS_IDX];
/*index for size of ngx_http_headers_out_t */
NGX_HTTP_CLOJURE_HEADERSOT_SIZE = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSOT_SIZE_IDX];
/*field offset index for ngx_http_headers_out_t*/
NGX_HTTP_CLOJURE_HEADERSO_STATUS_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_STATUS_IDX];
NGX_HTTP_CLOJURE_HEADERSO_STATUS_LINE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_STATUS_LINE_IDX];
NGX_HTTP_CLOJURE_HEADERSO_SERVER_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_SERVER_IDX];
NGX_HTTP_CLOJURE_HEADERSO_DATE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_DATE_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_ENCODING_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_ENCODING_IDX];
NGX_HTTP_CLOJURE_HEADERSO_LOCATION_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_LOCATION_IDX];
NGX_HTTP_CLOJURE_HEADERSO_REFRESH_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_REFRESH_IDX];
NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_RANGE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_RANGE_IDX];
NGX_HTTP_CLOJURE_HEADERSO_ACCEPT_RANGES_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_ACCEPT_RANGES_IDX];
NGX_HTTP_CLOJURE_HEADERSO_WWW_AUTHENTICATE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_WWW_AUTHENTICATE_IDX];
NGX_HTTP_CLOJURE_HEADERSO_EXPIRES_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_EXPIRES_IDX];
NGX_HTTP_CLOJURE_HEADERSO_ETAG_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_ETAG_IDX];
NGX_HTTP_CLOJURE_HEADERSO_OVERRIDE_CHARSET_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_OVERRIDE_CHARSET_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LEN_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LEN_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CHARSET_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CHARSET_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LOWCASE_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_LOWCASE_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_HASH_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_TYPE_HASH_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CACHE_CONTROL_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CACHE_CONTROL_IDX];
NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_N_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_N_IDX];
NGX_HTTP_CLOJURE_HEADERSO_DATE_TIME_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_DATE_TIME_IDX];
NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_TIME_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_TIME_IDX];
NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET = MEM_INDEX[NGX_HTTP_CLOJURE_HEADERSO_HEADERS_IDX];
// NGINX_CLOJURE_MODULE_CTX_PHRASE_ID_OFFSET = MEM_INDEX[NGINX_CLOJURE_MODULE_CTX_PHRASE_ID];
NGX_WORKER_PROCESSORS_NUM = MEM_INDEX[NGX_WORKER_PROCESSORS_NUM_ID];
NGINX_CLOJURE_RT_WORKERS = MEM_INDEX[NGINX_CLOJURE_RT_WORKERS_ID];
NGINX_CLOJURE_VER = MEM_INDEX[NGINX_CLOJURE_VER_ID];
NGINX_VER = MEM_INDEX[NGINX_VER_ID];
//now we not use final static to keep it from optimizing to constant integer
if (NGINX_CLOJURE_RT_REQUIRED_LVER > NGINX_CLOJURE_VER) {
throw new IllegalStateException("NginxClojureRT required version is >=" + formatVer(NGINX_CLOJURE_RT_REQUIRED_LVER) + ", but here is " + formatVer(NGINX_CLOJURE_VER));
}
NGINX_CLOJURE_FULL_VER = "nginx-clojure/" + formatVer(NGINX_VER) + "-" + formatVer(NGINX_CLOJURE_RT_VER);
KNOWN_REQ_HEADERS.put("Host", safeBuildKnownTableEltHeaderHolder("Host", NGX_HTTP_CLOJURE_HEADERSI_HOST_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Connection", safeBuildKnownTableEltHeaderHolder("Connection", NGX_HTTP_CLOJURE_HEADERSI_CONNECTION_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("If-Modified-Since",safeBuildKnownTableEltHeaderHolder("If-Modified-Since", NGX_HTTP_CLOJURE_HEADERSI_IF_MODIFIED_SINCE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("If-Unmodified-Since", safeBuildKnownTableEltHeaderHolder("If-Unmodified-Since", NGX_HTTP_CLOJURE_HEADERSI_IF_UNMODIFIED_SINCE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("User-Agent", safeBuildKnownTableEltHeaderHolder("User-Agent", NGX_HTTP_CLOJURE_HEADERSI_USER_AGENT_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Referer", safeBuildKnownTableEltHeaderHolder("Referer", NGX_HTTP_CLOJURE_HEADERSI_REFERER_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Content-Length", new OffsetHeaderHolder("Content-Length", NGX_HTTP_CLOJURE_HEADERSI_CONTENT_LENGTH_N_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Content-Type", safeBuildKnownTableEltHeaderHolder("Content-Type", NGX_HTTP_CLOJURE_HEADERSI_CONTENT_TYPE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Range", safeBuildKnownTableEltHeaderHolder("Range", NGX_HTTP_CLOJURE_HEADERSI_RANGE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("If-Range", safeBuildKnownTableEltHeaderHolder("If-Range", NGX_HTTP_CLOJURE_HEADERSI_IF_RANGE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Transfer-Encoding", safeBuildKnownTableEltHeaderHolder("Transfer-Encoding", NGX_HTTP_CLOJURE_HEADERSI_TRANSFER_ENCODING_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Expect", safeBuildKnownTableEltHeaderHolder("Expect", NGX_HTTP_CLOJURE_HEADERSI_EXPECT_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Accept-Encoding", safeBuildKnownTableEltHeaderHolder("Accept-Encoding", NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_ENCODING_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Via", safeBuildKnownTableEltHeaderHolder("Via", NGX_HTTP_CLOJURE_HEADERSI_VIA_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Authorization", safeBuildKnownTableEltHeaderHolder("Authorization", NGX_HTTP_CLOJURE_HEADERSI_AUTHORIZATION_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Keep-Alive", safeBuildKnownTableEltHeaderHolder("Keep-Alive", NGX_HTTP_CLOJURE_HEADERSI_KEEP_ALIVE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("X-Forwarded-For", safeBuildKnownArrayHeaderHolder("X-Forwarded-For", NGX_HTTP_CLOJURE_HEADERSI_X_FORWARDED_FOR_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("X-Real-Ip", safeBuildKnownTableEltHeaderHolder("X-Real-Ip", NGX_HTTP_CLOJURE_HEADERSI_X_REAL_IP_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Accept", safeBuildKnownTableEltHeaderHolder("Accept", NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Accept-Language", safeBuildKnownTableEltHeaderHolder("Accept-Language", NGX_HTTP_CLOJURE_HEADERSI_ACCEPT_LANGUAGE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Depth", safeBuildKnownTableEltHeaderHolder("Depth", NGX_HTTP_CLOJURE_HEADERSI_DEPTH_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Destination", safeBuildKnownTableEltHeaderHolder("Destination", NGX_HTTP_CLOJURE_HEADERSI_DESTINATION_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Overwrite", safeBuildKnownTableEltHeaderHolder("Overwrite", NGX_HTTP_CLOJURE_HEADERSI_OVERWRITE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Date", safeBuildKnownTableEltHeaderHolder("Date", NGX_HTTP_CLOJURE_HEADERSI_DATE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
KNOWN_REQ_HEADERS.put("Cookie", safeBuildKnownArrayHeaderHolder("Cookie", NGX_HTTP_CLOJURE_HEADERSI_COOKIE_OFFSET, NGX_HTTP_CLOJURE_HEADERSI_HEADERS_OFFSET));
/*temp setting only for CORE_VARS initialization*/
// defaultByteBuffer = ByteBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE);
// defaultCharBuffer = CharBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE);
initStringAddrMapsByNativeAddr(CORE_VARS, NGX_HTTP_CLOJURE_CORE_VARIABLES_ADDR);
initStringAddrMapsByNativeAddr(HEADERS_NAMES, NGX_HTTP_CLOJURE_HEADERS_NAMES_ADDR);
initStringAddrMapsByNativeAddr(MIME_TYPES, NGX_HTTP_CLOJURE_MIME_TYPES_ADDR);
SERVER_PORT_FETCHER = new RequestKnownNameVarFetcher("server_port");
SERVER_NAME_FETCHER = new RequestKnownNameVarFetcher("server_name");
REMOTE_ADDR_FETCHER = new RequestKnownNameVarFetcher("remote_addr");
URI_FETCHER = new RequestKnownOffsetVarFetcher(NGX_HTTP_CLOJURE_REQ_URI_OFFSET);
QUERY_STRING_FETCHER = new RequestKnownOffsetVarFetcher(NGX_HTTP_CLOJURE_REQ_ARGS_OFFSET);
SCHEME_FETCHER = new RequestKnownNameVarFetcher("scheme");
REQUEST_METHOD_FETCHER = new RequestMethodStrFetcher();
CONTENT_TYPE_FETCHER = new RequestKnownHeaderFetcher("Content-Type");
CHARACTER_ENCODING_FETCHER = new RequestCharacterEncodingFetcher();
// HEADER_FETCHER = new RequestHeadersFetcher();
BODY_FETCHER = new RequestBodyFetcher();
KNOWN_RESP_HEADERS.put("Server", safeBuildKnownTableEltHeaderHolder("Server", NGX_HTTP_CLOJURE_HEADERSO_SERVER_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Date", safeBuildKnownTableEltHeaderHolder("Date", NGX_HTTP_CLOJURE_HEADERSO_DATE_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Content-Encoding", safeBuildKnownTableEltHeaderHolder("Content-Encoding", NGX_HTTP_CLOJURE_HEADERSO_CONTENT_ENCODING_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Location", safeBuildKnownTableEltHeaderHolder("Location", NGX_HTTP_CLOJURE_HEADERSO_LOCATION_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Refresh", safeBuildKnownTableEltHeaderHolder("Refresh", NGX_HTTP_CLOJURE_HEADERSO_REFRESH_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Last-Modified", safeBuildKnownTableEltHeaderHolder("Last-Modified", NGX_HTTP_CLOJURE_HEADERSO_LAST_MODIFIED_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Content-Range", safeBuildKnownTableEltHeaderHolder("Content-Range", NGX_HTTP_CLOJURE_HEADERSO_CONTENT_RANGE_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Accept-Ranges", safeBuildKnownTableEltHeaderHolder("Accept-Ranges", NGX_HTTP_CLOJURE_HEADERSO_ACCEPT_RANGES_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("WWW-Authenticate", safeBuildKnownTableEltHeaderHolder("WWW-Authenticate", NGX_HTTP_CLOJURE_HEADERSO_WWW_AUTHENTICATE_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Expires", safeBuildKnownTableEltHeaderHolder("Expires", NGX_HTTP_CLOJURE_HEADERSO_EXPIRES_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Etag", safeBuildKnownTableEltHeaderHolder("Etag", NGX_HTTP_CLOJURE_HEADERSO_ETAG_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Cache-Control", safeBuildKnownArrayHeaderHolder("Cache-Control", NGX_HTTP_CLOJURE_HEADERSO_CACHE_CONTROL_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET));
KNOWN_RESP_HEADERS.put("Content-Type", RESP_CONTENT_TYPE_HOLDER = new ResponseContentTypeHolder());
KNOWN_RESP_HEADERS.put("Content-Length", new OffsetHeaderHolder("Content-Length", NGX_HTTP_CLOJURE_HEADERSO_CONTENT_LENGTH_N_OFFSET, NGX_HTTP_CLOJURE_HEADERSO_HEADERS_OFFSET) );
/*clear all to let initWorkers initializing them correctly*/
// defaultByteBuffer = null;
// defaultCharBuffer = null;
initWorkers((int)NGINX_CLOJURE_RT_WORKERS);
//set system properties for build-in nginx handler factories
System.setProperty(NginxHandlerFactory.NGINX_CLOJURE_HANDLER_FACTORY_SYSTEM_PROPERTY_PREFIX + "java", "nginx.clojure.java.NginxJavaHandlerFactory");
System.setProperty(NginxHandlerFactory.NGINX_CLOJURE_HANDLER_FACTORY_SYSTEM_PROPERTY_PREFIX + "clojure", "nginx.clojure.clj.NginxClojureHandlerFactory");
System.setProperty(NginxHandlerFactory.NGINX_CLOJURE_HANDLER_FACTORY_SYSTEM_PROPERTY_PREFIX + "groovy", "nginx.clojure.groovy.NginxGroovyHandlerFactory");
}
private static synchronized void destoryMemIndex() {
destoryWorkers();
MEM_INDEX = null;
}
public static void initUnsafe() {
if (UNSAFE != null) {
return;
}
UNSAFE = HackUtils.UNSAFE;
}
/**
* DO NOT use this method for frequent invoking because it is slow and not optimized.
*/
public static String evalSimpleExp(String v, Map<String, String> vars) {
int p = v.indexOf("#{");
if (p > -1) {
int s = 0;
StringBuilder sb = new StringBuilder();
while (p > -1) {
if (p != s) {
sb.append(v.substring(s, p));
}
s = v.indexOf('}', p);
if (s < 0) {
sb.append(v.substring(p));
break;
}
String ek = v.substring(p+2, s);
String ev = vars.get(ek);
if (ev == null) {
ev = vars.get("system." + ek);
if (ev == null) {
ev = System.getProperty(ek);
}
}
sb.append(ev);
s++;
p = v.indexOf("#{", s);
}
if (p < 0 && s != v.length()) {
sb.append(v.substring(s));
}
return sb.toString();
}
return v;
}
public static synchronized int registerCode(int phase, long typeNStr, long nameNStr, long codeNStr, long pros) {
// if (CODE_MAP.containsKey(codeNStr)) {
// return CODE_MAP.get(codeNStr);
// }
//
// if (CODE_MAP.containsKey(nameNStr)) {
// return CODE_MAP.get(nameNStr);
// }
String type = fetchNGXString(typeNStr, DEFAULT_ENCODING);
String name = fetchNGXString(nameNStr, DEFAULT_ENCODING);
String code = fetchNGXString(codeNStr, DEFAULT_ENCODING);
NginxHandler handler = NginxHandlerFactory.fetchHandler(phase, type, name, code);
HANDLERS.add(handler);
Runnable runnable = new Runnable() {
public void run() {
if (pros != 0) {
Map<String, String> properties = new ArrayMap<String, String>();
int size = fetchNGXInt(pros + NGX_HTTP_CLOJURE_ARRAY_NELTS_OFFSET);
long ele = UNSAFE.getAddress(pros + NGX_HTTP_CLOJURE_ARRAY_ELTS_OFFSET);
for (int i = 0; i < size; i++) {
long kv = ele + i * NGX_HTTP_CLOJURE_KEYVALT_SIZE;
properties.put(fetchNGXString(kv + NGX_HTTP_CLOJURE_KEYVALT_KEY_OFFSET, DEFAULT_ENCODING),
fetchNGXString(kv + NGX_HTTP_CLOJURE_KEYVALT_VALUE_OFFSET, DEFAULT_ENCODING));
}
for (Entry<String, String> en : properties.entrySet()) {
en.setValue(evalSimpleExp(en.getValue(), properties));
}
if (handler instanceof Configurable) {
Configurable cr = (Configurable) handler;
cr.config(properties);
}else {
log.warn("%s is not an instance of nginx.clojure.Configurable, so properties will be ignored!",
handler.getClass());
}
}
}
};
if (coroutineEnabled) {
new Coroutine(runnable).resume();
} else {
runnable.run();
}
return HANDLERS.size() - 1;
}
/**
* convert ngx_str_t to java String
*/
public static final String fetchNGXString(long address, Charset encoding) {
if (address == 0){
return null;
}
long lenAddr = address + NGX_HTTP_CLOJURE_STR_LEN_OFFSET;
int len = fetchNGXInt(lenAddr);
if (len <= 0){
return null;
}
return fetchString(address + NGX_HTTP_CLOJURE_STR_DATA_OFFSET, len, encoding);
}
/**
* convert ngx_str_t to java String
*/
public static final String fetchNGXString(long address, Charset encoding, ByteBuffer bb, CharBuffer cb) {
if (address == 0){
return null;
}
long lenAddr = address + NGX_HTTP_CLOJURE_STR_LEN_OFFSET;
int len = fetchNGXInt(lenAddr);
if (len <= 0){
return null;
}
return fetchString(address + NGX_HTTP_CLOJURE_STR_DATA_OFFSET, len, encoding, bb, cb);
}
public static final int pushNGXString(long address, String val, Charset encoding, long pool){
long lenAddr = address + NGX_HTTP_CLOJURE_STR_LEN_OFFSET;
long dataAddr = address + NGX_HTTP_CLOJURE_STR_DATA_OFFSET;
if (val == null) {
UNSAFE.putAddress(dataAddr, 0);
pushNGXInt(lenAddr, 0);
return 0;
}else {
int len = pushString(dataAddr, val, encoding, pool);
pushNGXInt(lenAddr, len);
return len;
}
}
public static final int pushNGXLowcaseString(long address, String val, Charset encoding, long pool){
long lenAddr = address + NGX_HTTP_CLOJURE_STR_LEN_OFFSET;
long dataAddr = address + NGX_HTTP_CLOJURE_STR_DATA_OFFSET;
int len = pushLowcaseString(dataAddr, val, encoding, pool);
pushNGXInt(lenAddr, len);
return len;
}
public static final int fetchNGXInt(long address){
return NGX_HTTP_CLOJURE_UINT_SIZE == 4 ? UNSAFE.getInt(address) : (int)UNSAFE.getLong(address);
}
public static final void pushNGXInt(long address, int val){
if (NGX_HTTP_CLOJURE_UINT_SIZE == 4){
UNSAFE.putInt(address, val);
}else {
UNSAFE.putLong(address, val);
}
}
public static final long fetchNGXOfft(long address){
return NGX_HTTP_CLOJURE_OFFT_SIZE == 4 ? UNSAFE.getInt(address) : UNSAFE.getLong(address);
}
public static final void pushNGXOfft(long address, long val){
if (NGX_HTTP_CLOJURE_OFFT_SIZE == 4){
UNSAFE.putInt(address, (int)val);
}else {
UNSAFE.putLong(address, val);
}
}
public static final void pushNGXSizet(long address, int val){
if (NGX_HTTP_CLOJURE_SIZET_SIZE == 4){
UNSAFE.putInt(address, val);
}else {
UNSAFE.putLong(address, val);
}
}
public final static String fetchDString(long address, int size) {
ByteBuffer bb = pickByteBuffer();
CharBuffer cb = pickCharBuffer();
if (size > bb.capacity()) {
bb = ByteBuffer.allocate(size);
}
ngx_http_clojure_mem_copy_to_obj(address, bb.array(), BYTE_ARRAY_OFFSET, size);
bb.limit(size);
return HackUtils.decode(bb, DEFAULT_ENCODING, cb);
}
public final static String fetchString(long paddress, int size) {
return fetchString(paddress, size, DEFAULT_ENCODING);
}
public static final String fetchString(long paddress, int size, Charset encoding, ByteBuffer bb, CharBuffer cb) {
if (size > bb.limit()) {
size = bb.limit();
}
if (size == 7168) {
System.err.println("too long value??");
}
ngx_http_clojure_mem_copy_to_obj(UNSAFE.getAddress(paddress), bb.array(), BYTE_ARRAY_OFFSET, size);
bb.limit(size);
return HackUtils.decode(bb, encoding, cb);
}
public static final String fetchString(long paddress, int size, Charset encoding) {
ByteBuffer bb = pickByteBuffer();
CharBuffer cb = pickCharBuffer();
if (size > bb.capacity()) {
bb = ByteBuffer.allocate(size);
}
ngx_http_clojure_mem_copy_to_obj(UNSAFE.getAddress(paddress), bb.array(), BYTE_ARRAY_OFFSET, size);
bb.limit(size);
return HackUtils.decode(bb, encoding, cb);
}
public static final String fetchStringValidPart(long paddress, int off, int size, Charset encoding, ByteBuffer bb, CharBuffer cb) {
ByteBuffer lb = null;
if (size > bb.remaining()) {
lb = ByteBuffer.allocate(size);
ngx_http_clojure_mem_copy_to_obj(UNSAFE.getAddress(paddress) + off, lb.array(), BYTE_ARRAY_OFFSET, size);
lb.limit(size);
cb = HackUtils.decodeValid(lb, encoding, cb);
if (lb.remaining() == 0) {
bb.position(bb.limit());
}else if (lb.remaining() < bb.remaining()){
bb.position(bb.position() + lb.remaining());
}
return cb.toString();
}
ngx_http_clojure_mem_copy_to_obj(UNSAFE.getAddress(paddress) + off, bb.array(), bb.arrayOffset() + bb.position() + BYTE_ARRAY_OFFSET, size);
bb.limit(size);
cb = HackUtils.decodeValid(bb, encoding, cb);
return cb.toString();
}
public static final int pushLowcaseString(long paddress, String val, Charset encoding, long pool) {
ByteBuffer bb = pickByteBuffer();
bb = HackUtils.encodeLowcase(val, encoding, bb);
int len = bb.remaining();
long strAddr = ngx_palloc(pool, len);
UNSAFE.putAddress(paddress, strAddr);
ngx_http_clojure_mem_copy_to_addr(bb.array(), BYTE_ARRAY_OFFSET , strAddr, len);
return len;
}
public static final int pushString(long paddress, String val, Charset encoding, long pool) {
ByteBuffer bb = pickByteBuffer();
bb = HackUtils.encode(val, encoding, bb);
int len = bb.remaining();
long strAddr = ngx_palloc(pool, len);
UNSAFE.putAddress(paddress, strAddr);
ngx_http_clojure_mem_copy_to_addr(bb.array(), BYTE_ARRAY_OFFSET , strAddr, len);
return len;
}
public static final String getNGXVariable(final long r, final String name) {
if (r == 0) {
throw new RuntimeException("invalid request which address is 0!");
}
if (Thread.currentThread() != NGINX_MAIN_THREAD) {
FutureTask<String> task = new FutureTask<String>(new Callable<String>() {
@Override
public String call() throws Exception {
return unsafeGetNGXVariable(r, name);
}
});
postPollTaskEvent(task);
try {
return task.get();
} catch (InterruptedException e) {
throw new RuntimeException("getNGXVariable " + name + " error", e);
} catch (ExecutionException e) {
throw new RuntimeException("getNGXVariable " + name + " error", e.getCause());
}
}else {
return unsafeGetNGXVariable(r, name);
}
}
public static final String unsafeGetNGXVariable(long r, String name) {
if (CORE_VARS.containsKey(name)) {
return (String) new RequestKnownNameVarFetcher(name).fetch(r, DEFAULT_ENCODING);
}
return (String) new RequestUnknownNameVarFetcher(name).fetch(r, DEFAULT_ENCODING);
}
public static final int setNGXVariable(final long r, final String name, final String val) {
if (r == 0) {
throw new RuntimeException("invalid request which address is 0!");
}
if (Thread.currentThread() != NGINX_MAIN_THREAD) {
FutureTask<Integer> task = new FutureTask<Integer>(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return unsafeSetNginxVariable(r, name, val);
}
});
postPollTaskEvent(task);
try {
return task.get();
} catch (InterruptedException e) {
throw new RuntimeException("setNGXVariable " + name + " error", e);
} catch (ExecutionException e) {
throw new RuntimeException("setNGXVariable " + name + " error", e.getCause());
}
}else {
return unsafeSetNginxVariable(r, name, val);
}
}
public static int unsafeSetNginxVariable(long r, String name, String val) throws OutOfMemoryError {
long np = CORE_VARS.containsKey(name) ? CORE_VARS.get(name) : 0;
long pool = UNSAFE.getAddress(r + NGX_HTTP_CLOJURE_REQ_POOL_OFFSET);
if (pool == 0) {
throw new RuntimeException("pool is null, maybe request is finished by wrong coroutine configuration!");
}
if (np == 0) {
np = ngx_palloc(pool, NGX_HTTP_CLOJURE_STR_SIZE);
pushNGXLowcaseString(np, name, DEFAULT_ENCODING, pool);
}
ByteBuffer vbb = HackUtils.encode(val, DEFAULT_ENCODING, pickByteBuffer());
int vlen = vbb.remaining();
long strAddr = ngx_palloc(pool, vbb.remaining());
if (strAddr == 0) {
throw new OutOfMemoryError("nginx OutOfMemoryError");
}
ngx_http_clojure_mem_copy_to_addr(vbb.array(), BYTE_ARRAY_OFFSET, strAddr, vlen);
return (int)ngx_http_clojure_mem_set_variable(r, np, strAddr, vlen);
}
public static long discardRequestBody(final long r) {
if (r == 0) {
throw new RuntimeException("invalid request which address is 0!");
}
if (Thread.currentThread() != NGINX_MAIN_THREAD) {
FutureTask<Long> task = new FutureTask<Long>(new Callable<Long>() {
@Override
public Long call() throws Exception {
return ngx_http_discard_request_body(r);
}
});
postPollTaskEvent(task);
try {
return task.get();
} catch (InterruptedException e) {
throw new RuntimeException("discardRequestBody error", e);
} catch (ExecutionException e) {
throw new RuntimeException("discardRequestBody error", e.getCause());
}
}else {
return ngx_http_discard_request_body(r);
}
}
public static int eval(final int codeId, final long r, final long c) {
return HANDLERS.get(codeId).execute(r, c);
}
public static LoggerService getLog() {
//be friendly to junit test
if (log == null) {
//standard error stream is redirect to the nginx error log file, so we just use System.err as output stream.
log = TinyLogService.createDefaultTinyLogService();
}
return log;
}
public static void setLog(LoggerService log) {
NginxClojureRT.log = log;
}
public final static long makeEventAndSaveIt(long type, Object o) {
long id = ngx_http_clojure_mem_get_obj_addr(o);
long event = type << 56 | id;
savePostEventData(id, o);
return event;
}
public static void postCloseSocketEvent(NginxClojureSocketImpl s) {
ngx_http_clojure_mem_post_event(makeEventAndSaveIt(POST_EVENT_TYPE_CLOSE_SOCKET, s), null, 0);
}
public static HijackEvent pickHijackEvent() {
HijackEvent e = pooledEvents.poll();
if (e == null) {
return new HijackEvent();
}
return e;
}
public static void returnHijackEvent(HijackEvent e) {
e.recycle();
pooledEvents.add(e);
}
public static void postHijackSendEvent(NginxHttpServerChannel channel, Object message, long off, int len, int flag) {
HijackEvent hijackEvent = pickHijackEvent().reset(channel, message, off, len , flag);
ngx_http_clojure_mem_post_event(
makeEventAndSaveIt(POST_EVENT_TYPE_HIJACK_SEND, hijackEvent), null, 0);
}
public static long postHijackWriteEvent(NginxHttpServerChannel channel, Object message, long off, int len) throws IOException {
HijackEvent hijackEvent = pickHijackEvent().reset(channel, message, off, len, 0);
ngx_http_clojure_mem_post_event(
makeEventAndSaveIt(POST_EVENT_TYPE_HIJACK_WRITE, hijackEvent), null, 0);
try {
hijackEvent.awaitForFinish();
long rc = hijackEvent.offset;
returnHijackEvent(hijackEvent);
return rc;
} catch (InterruptedException e) {
throw new IOException("write await be interrupted", e);
}
}
public static void postHijackSendHeaderEvent(NginxHttpServerChannel channel, int flag) {
HijackEvent hijackEvent = pickHijackEvent().reset(channel, null, 0, 0, flag);
ngx_http_clojure_mem_post_event(
makeEventAndSaveIt(POST_EVENT_TYPE_HIJACK_SEND_HEADER, hijackEvent), null, 0);
}
public static void postHijackSendHeaderEvent(NginxHttpServerChannel channel, Object buf, int pos, int len, int flag) {
HijackEvent hijackEvent = pickHijackEvent().reset(channel, buf, pos, len, flag);
ngx_http_clojure_mem_post_event(
makeEventAndSaveIt(POST_EVENT_TYPE_HIJACK_SEND_HEADER, hijackEvent), null, 0);
}
public static void postHijackSendResponseEvent(NginxHttpServerChannel channel, NginxResponse resp, long chain) {
HijackEvent hijackEvent = pickHijackEvent().reset(channel, resp, chain);
ngx_http_clojure_mem_post_event(
makeEventAndSaveIt(POST_EVENT_TYPE_HIJACK_SEND_RESPONSE, hijackEvent), null, 0);
}
private final static byte[] POST_EVENT_BUF = new byte[4096];
public static int handlePostEvent(long event, byte[] body, long off) {
if (event == 0) { //event loop wake up event
return NGX_OK;
}
int tag = (int)((0xff00000000000000L & event) >>> 56);
long data = event & 0x00ffffffffffffffL;
if (tag <= POST_EVENT_TYPE_SYSTEM_EVENT_IDX_END) {
switch (tag) {
case POST_EVENT_TYPE_HANDLE_RESPONSE:
return handlePostedResponse(data);
case POST_EVENT_TYPE_CLOSE_SOCKET:
try {
NginxClojureSocketImpl s = (NginxClojureSocketImpl) POSTED_EVENTS_DATA.remove(data);
s.closeByPostEvent();
return NGX_OK;
}catch (Throwable e) {
log.error("handle post close event error", e);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
case POST_EVENT_TYPE_HIJACK_SEND : {
HijackEvent hijackEvent = (HijackEvent)POSTED_EVENTS_DATA.remove(data);
try{
if (hijackEvent.channel.request.isReleased()) {
if (hijackEvent.message == null) {
return NGX_OK;
}
log.error("#%d: NginxHttpServerChannel released, request=%s", hijackEvent.channel.request.nativeRequest(), hijackEvent.channel.request);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
if (hijackEvent.message instanceof ByteBuffer) {
hijackEvent.channel.send((ByteBuffer)hijackEvent.message, hijackEvent.flag);
}else {
hijackEvent.channel.send((byte[])hijackEvent.message, hijackEvent.offset, hijackEvent.len, hijackEvent.flag);
}
}finally{
returnHijackEvent(hijackEvent);
}
return NGX_OK;
}
case POST_EVENT_TYPE_HIJACK_SEND_HEADER : {
HijackEvent hijackHeaderEvent = (HijackEvent)POSTED_EVENTS_DATA.remove(data);
try{
if (hijackHeaderEvent.channel.request.isReleased()) {
log.error("#%d: send header on released NginxHttpServerChannel , request=%s", hijackHeaderEvent.channel.request.nativeRequest(), hijackHeaderEvent.channel.request);
returnHijackEvent(hijackHeaderEvent);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
if (hijackHeaderEvent.message != null) {
if (hijackHeaderEvent.message instanceof ByteBuffer) {
hijackHeaderEvent.channel.sendHeader((ByteBuffer)hijackHeaderEvent.message, hijackHeaderEvent.flag);
}else {
hijackHeaderEvent.channel.sendHeader((byte[])hijackHeaderEvent.message, hijackHeaderEvent.offset, hijackHeaderEvent.len, hijackHeaderEvent.flag);
}
}else {
hijackHeaderEvent.channel.sendHeader(hijackHeaderEvent.flag);
}
}finally{
returnHijackEvent(hijackHeaderEvent);
}
return NGX_OK;
}
case POST_EVENT_TYPE_HIJACK_SEND_RESPONSE : {
HijackEvent hijackResponseEvent = (HijackEvent)POSTED_EVENTS_DATA.remove(data);
try {
if (hijackResponseEvent.channel.request.isReleased()) {
log.error("#%d: send response on released NginxHttpServerChannel, request=%s", hijackResponseEvent.channel.request.nativeRequest(), hijackResponseEvent.channel.request);
returnHijackEvent(hijackResponseEvent);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
NginxRequest request = hijackResponseEvent.channel.request;
request.channel().sendResponseHelp((NginxResponse) hijackResponseEvent.message, hijackResponseEvent.offset);
}finally {
returnHijackEvent(hijackResponseEvent);
}
return NGX_OK;
}
case POST_EVENT_TYPE_HIJACK_WRITE : {
HijackEvent hijackEvent = (HijackEvent)POSTED_EVENTS_DATA.remove(data);
try{
if (hijackEvent.channel.request.isReleased()) {
log.error("#%d: send response on released NginxHttpServerChannel, request=%s", hijackEvent.channel.request.nativeRequest(), hijackEvent.channel.request);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
if (hijackEvent.message instanceof ByteBuffer) {
hijackEvent.complete(hijackEvent.channel.unsafeWrite((ByteBuffer) hijackEvent.message));
} else {
hijackEvent.complete(hijackEvent.channel.unsafeWrite((byte[]) hijackEvent.message, hijackEvent.offset,
hijackEvent.len));
}
}finally{
/*it will be released in the method postHijackWriteEvent */
// returnHijackEvent(hijackEvent);
}
return NGX_OK;
}
case POST_EVENT_TYPE_POLL_TASK : {
Runnable task = (Runnable) POSTED_EVENTS_DATA.remove(data);
try {
task.run();
return NGX_OK;
}catch(Throwable e) {
log.error("handle post poll task event error", e);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
}
case POST_EVENT_TYPE_PUB : {
appEventListenerManager.onBroadcastedEvent(tag, data);
return NGX_OK;
}
default:
log.error("handlePostEvent:unknown event tag :%d", tag);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
} else {
if (tag < POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START) {
appEventListenerManager.onBroadcastedEvent(tag, data);
return NGX_OK;
}else {
appEventListenerManager.onBroadcastedEvent(tag, body, (int)off, (int)data);
return NGX_OK;
}
}
}
/**
* called by native code
*/
private static int handlePostEvent(long event, long pipe) {
int tag = (int)((0xff00000000000000L & event) >>> 56);
long data = event & 0x00ffffffffffffffL;
if (log.isDebugEnabled()) {
log.debug("handlePostEvent tag=%d, len/data=%d", tag, data);
}
if (tag < POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START) {
return handlePostEvent(event, null, 0);
} else {
long rc = ngx_http_clojure_mem_read_raw_pipe(pipe, POST_EVENT_BUF,
BYTE_ARRAY_OFFSET, data);
if (rc != data) {
log.error("ngx_http_clojure_mem_read_raw_pipe error, return %d, expect %d", rc, data);
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
return handlePostEvent(event, POST_EVENT_BUF, 0);
}
}
private static void handleChannelEvent(int type, long status, Object data, ChannelListener<Object> listener) {
try {
switch(type) {
case NGX_HTTP_CLOJURE_CHANNEL_EVENT_CLOSE:
listener.onClose(data);
break;
case NGX_HTTP_CLOJURE_CHANNEL_EVENT_CONNECT :
listener.onConnect(status, data);
break;
case NGX_HTTP_CLOJURE_CHANNEL_EVENT_READ:
listener.onRead(status, data);
break;
case NGX_HTTP_CLOJURE_CHANNEL_EVENT_WRITE:
listener.onWrite(status, data);
break;
default:
if (listener instanceof RawMessageListener) {
RawMessageListener<Object> rawListener = (RawMessageListener<Object>) listener;
if ( (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGTEXT) != 0) {
rawListener.onTextMessage(data, status, (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGREMAIN) != 0, (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGFIRST) != 0);
}else if ( (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGBIN) != 0) {
rawListener.onBinaryMessage(data, status, (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGREMAIN) != 0, (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGFIRST) != 0);
}else if ( (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGCLOSE) != 0) {
rawListener.onClose(data, status);//(data, status, (type & NGX_HTTP_CLOJURE_CHANNEL_EVENT_MSGREMAIN) != 0);
}
}
}
}catch(Throwable e) {
log.error("handleChannelEvent error", e);
}
}
public static int handlePostedResponse(long r) {
WorkerResponseContext ctx = (WorkerResponseContext) POSTED_EVENTS_DATA.remove(r);
NginxResponse resp = ctx.response;
NginxRequest req = ctx.request;
long rc = NGX_OK;
if (ctx.request.isReleased()) {
if (resp.type() > 0) {
log.error("#%d: request is release! and we alos meet an unhandled exception! %s", req.nativeRequest(), resp.fetchBody());
}else {
log.error("#%d: request is release! ", req.nativeRequest());
}
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
ctx.request.applyDelayed();
if (resp.type() == NginxResponse.TYPE_FAKE_PHASE_DONE) {
if (ctx.request.phase() == NGX_HTTP_HEADER_FILTER_PHASE) {
rc = ngx_http_filter_continue_next(r, NGX_HTTP_HEADER_FILTER_IN_THREADPOOL, 0);
ngx_http_finalize_request(r, rc);
return NGX_OK;
} else if (ctx.request.phase() == NGX_HTTP_BODY_FILTER_PHASE) {
ctx.chain = req.handler().buildOutputChain(resp);
NginxFilterRequest fr = (NginxFilterRequest)req;
rc = ngx_http_filter_continue_next(r, ctx.chain, fr.isLast() ? 0 : fr.chunkChain());
if (resp.isLast()) {
ngx_http_finalize_request(r, rc);
}
return NGX_OK;
}
ngx_http_clojure_mem_continue_current_phase(r, NGX_DECLINED);
return NGX_OK;
} else if (ctx.request.phase() == NGX_HTTP_BODY_FILTER_PHASE) {
ctx.chain = req.handler().buildOutputChain(resp);
NginxFilterRequest fr = (NginxFilterRequest)req;
rc = ngx_http_filter_continue_next(r, ctx.chain, fr.isLast() ? 0 : fr.chunkChain());
if (resp.isLast()) {
ngx_http_finalize_request(r, rc);
} else {
ngx_http_clojure_mem_inc_req_count(r, -1);
}
return NGX_OK;
}
// the handler returns direct body and doesn't want to continue next phase.
long chain = ctx.chain;
int phase = req.phase();
long nr = req.nativeRequest();
if (chain < 0) {
req.handler().prepareHeaders(req, -(int)chain, resp.fetchHeaders());
rc = -chain;
}else if (chain == 0) {
rc = NGX_HTTP_INTERNAL_SERVER_ERROR;
} else {
int status = ctx.response.fetchStatus(NGX_HTTP_OK);
if (phase == NGX_HTTP_HEADER_FILTER_PHASE || phase == NGX_HTTP_BODY_FILTER_PHASE) {
ngx_http_clear_header_and_reset_ctx_phase(nr, ~phase);
}
req.handler().prepareHeaders(req, status, resp.fetchHeaders());
rc = ngx_http_send_header(nr);
if (rc == NGX_ERROR || rc > NGX_OK) {
}else {
rc = ngx_http_output_filter(r, chain);
if (rc == NGX_OK && phase != -1) {
ngx_http_ignore_next_response(nr);
}
if (phase != -1) {
if (phase == NGX_HTTP_ACCESS_PHASE || phase == NGX_HTTP_REWRITE_PHASE ) {
rc = handleReturnCodeFromHandler(nr, phase, rc, status);
}else {
handleReturnCodeFromHandler(nr, phase, rc, status);
}
}
}
}
if (phase == -1 || phase == NGX_HTTP_HEADER_FILTER_PHASE) {
ngx_http_finalize_request(r, rc);
}else if (rc != NGX_DONE) {
ngx_http_clojure_mem_continue_current_phase(r, rc);
}
return NGX_OK;
}
protected static long handleReturnCodeFromHandler(long r, int phase, long rc, int status) {
if (phase == -1 || rc == NGX_ERROR ) {
return rc;
}
if (phase == NGX_HTTP_HEADER_FILTER_PHASE) { //header filter want to hajick all the response ,e.g some exception happends
return NGX_ERROR;
}
ngx_http_finalize_request(r, rc);
if (phase == NGX_HTTP_ACCESS_PHASE || phase == NGX_HTTP_REWRITE_PHASE ) {
return NGX_DONE;
}
return rc;
}
public static int handleLoadBalancerResponse(NginxRequest req, long c, NginxResponse resp) {
if (resp == null) {
return NGX_HTTP_NOT_FOUND;
}
int status = resp.fetchStatus(NGX_ERROR);
if (status != NGX_HTTP_OK) {
return status;
}
Object body = resp.fetchBody();
if (body == null) {
return NGX_HTTP_NOT_FOUND;
}
long idxOrLenAddr = UNSAFE.getAddress(c);
long urlAddr = UNSAFE.getAddress(c + NGX_HTTP_CLOJURE_UINT_SIZE);
long pool = UNSAFE.getAddress(req.nativeRequest() + NGX_HTTP_CLOJURE_REQ_POOL_OFFSET);
if (body instanceof String) {
String url = (String)body;
pushNGXInt(idxOrLenAddr, url.length());
pushString(urlAddr, url, DEFAULT_ENCODING, pool);
} else if (body instanceof Integer) {
pushNGXInt(idxOrLenAddr, (Integer)body);
} else {
log.error("bad load balancer result type :" + body.getClass() + ", should be integer or string");
return NGX_ERROR;
}
return NGX_OK;
}
public static int handleResponse(NginxRequest r, final NginxResponse resp) {
if (Thread.currentThread() != NGINX_MAIN_THREAD) {
throw new RuntimeException("handleResponse can not be called out of nginx clojure main thread!");
}
if (resp == null) {
return NGX_HTTP_NOT_FOUND;
}
int phase = r.phase();
if (resp.type() == NginxResponse.TYPE_FAKE_PHASE_DONE) {
if (phase == NGX_HTTP_REWRITE_PHASE || phase == NGX_HTTP_ACCESS_PHASE) {
return NGX_DECLINED;
}
//header filter
return (int)ngx_http_filter_continue_next(r.nativeRequest(), NGX_HTTP_HEADER_FILTER, 0);
}
NginxHandler handler = r.handler();
int status = resp.fetchStatus(NGX_HTTP_OK);
long chain = handler.buildOutputChain(resp);
if (chain < 0) {
status = -(int)chain;
handler.prepareHeaders(r, status, resp.fetchHeaders());
return status;
}
long nr = r.nativeRequest();
if (phase == NGX_HTTP_HEADER_FILTER_PHASE) {
ngx_http_clear_header_and_reset_ctx_phase(nr, ~phase);
}else if (phase == NGX_HTTP_BODY_FILTER_PHASE) {
NginxFilterRequest fr = (NginxFilterRequest)r;
ngx_http_clear_header_and_reset_ctx_phase(nr, ~phase, false);
return (int)ngx_http_filter_continue_next(r.nativeRequest(), chain, fr.isLast() ? 0 : fr.chunkChain());
}
handler.prepareHeaders(r, status, resp.fetchHeaders());
long rc = ngx_http_send_header(r.nativeRequest());
if (rc == NGX_ERROR || rc > NGX_OK) {
return (int) rc;
}
rc = ngx_http_output_filter(r.nativeRequest(), chain);
if (rc == NGX_OK && phase != -1) {
ngx_http_ignore_next_response(nr);
}
return (int)handleReturnCodeFromHandler(nr, phase, rc, status);
}
public static void completeAsyncResponse(NginxRequest req, final NginxResponse resp) {
if (req == null) {
return;
}
long r = req.nativeRequest();
if (r == 0) {
return;
}
if (req.isReleased()) {
if (resp.type() > 0) {
log.error("#%d: request is release! and we alos meet an unhandled exception! %s", req.nativeRequest(), resp.fetchBody());
}else {
log.error("#%d: request is release! ", req.nativeRequest());
}
return;
}
req.applyDelayed();
long rc;
int phase = req.phase();
if (resp.type() == NginxResponse.TYPE_FAKE_PHASE_DONE) {
if (phase == NGX_HTTP_HEADER_FILTER_PHASE) {
rc = ngx_http_filter_continue_next(r, NGX_HTTP_HEADER_FILTER, 0);
ngx_http_finalize_request(r, rc);
return;
}
ngx_http_clojure_mem_continue_current_phase(r, NGX_DECLINED);
return;
}
rc = handleResponse(req, resp);
if (phase == -1 || phase == NGX_HTTP_HEADER_FILTER_PHASE) {
ngx_http_finalize_request(r, rc);
}else if (rc != MiniConstants.NGX_DONE) {
ngx_http_clojure_mem_continue_current_phase(r, rc);
}
}
public static void completeAsyncResponse(NginxRequest r, int rc) {
if (r == null) {
return;
}
completeAsyncResponse(r.nativeRequest(), rc);
}
public static void completeAsyncResponse(long r, int rc) {
if (r == 0) {
return;
}
ngx_http_finalize_request(r, rc);
}
/**
* When called in the main thread it will be handled directly otherwise it will post a event by pipe let
* main thread get a chance to handle this response.
*/
public static void postResponseEvent(NginxRequest req, NginxResponse resp) {
if (Thread.currentThread() == NGINX_MAIN_THREAD) {
int phase = req.phase();
int rc = handleResponse(req, resp);
if (phase == -1 || phase == NGX_HTTP_HEADER_FILTER_PHASE) {
ngx_http_finalize_request(req.nativeRequest(), rc);
} else if (rc != MiniConstants.NGX_DONE) {
ngx_http_clojure_mem_continue_current_phase(req.nativeRequest(), rc);
}
} else {
long r = req.nativeRequest();
WorkerResponseContext ctx = new WorkerResponseContext(resp, req);
savePostEventData(r, ctx);
ngx_http_clojure_mem_post_event(r, null, 0);
}
}
public static void postPollTaskEvent(Runnable task) {
ngx_http_clojure_mem_post_event(makeEventAndSaveIt(POST_EVENT_TYPE_POLL_TASK,task), null, 0);
}
/**
* broadcast simple event to all nginx workers
* @param tag must be less than POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START
* @param id event id, must be less than 0x0100000000000000L
* @param
*/
public static int broadcastEvent(long tag, long id) {
if (tag >= POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START) {
throw new IllegalArgumentException("invalid event tag :" + tag);
}
if (id >= 0x0100000000000000L) {
throw new IllegalArgumentException("invalid event id :" + id + ", must be less than 0x0100000000000000L");
}
id |= (tag << 56);
if (Thread.currentThread() == NGINX_MAIN_THREAD) {
int rt = (int) ngx_http_clojure_mem_broadcast_event(id, null, 0, 0);
if (rt == 0) {
return handlePostEvent(id, null, 0);
} else {
rt = handlePostEvent(id, null, 0);
}
return rt;
} else {
return (int) ngx_http_clojure_mem_broadcast_event(id, null, 0, 1);
}
}
/**
* broadcast event to all nginx workers, message length must be less than PIPE_BUF - 8, generally on Linux/Windows is 4088, on MacosX is 504
* message will be truncated if its length exceeds this limitation.
* @param tag must be greater than POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START and less than POST_EVENT_TYPE_COMPLEX_EVENT_IDX_END
* @param body
* @param offset
* @param len
*/
public static int broadcastEvent(long tag, byte[] body, long offset, long len) {
if (tag >= 0xff) {
throw new IllegalArgumentException("invalid event tag :" + tag);
}
if (tag < POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START) {
throw new IllegalArgumentException("invalid event tag :" + tag + ", must be greater than POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START");
}
long event = (tag << 56) | len;
if (log.isDebugEnabled()) {
log.debug("broadcast event tag=%d, body=%s", tag, new String(body, (int)offset, (int)len), DEFAULT_ENCODING);
}
if (Thread.currentThread() == NGINX_MAIN_THREAD) {
int rt = (int)ngx_http_clojure_mem_broadcast_event(event, body, BYTE_ARRAY_OFFSET + offset, 0);
if (rt == 0) {
rt = (int)handlePostEvent(event, body, offset);
} else {
handlePostEvent(event, body, offset);
}
return rt;
} else {
return (int)ngx_http_clojure_mem_broadcast_event(event, body, BYTE_ARRAY_OFFSET + offset, 1);
}
}
/**
* broadcast event to all nginx workers, message length must be less than PIPE_BUF - 8, generally on Linux/Windows is 4088, on MacosX is 504
* message will be truncated if its length exceeds this limitation.
* it is identical to
* <pre>
* broadcastEvent(POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START, body, offset, len);
* </pre>
*/
public static int broadcastEvent(byte[] message, long offset, long len) {
return broadcastEvent(POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START, message, offset, len);
}
/**
* broadcast event to all nginx workers, message length, viz. message.getBytes("utf-8").length, must be less than PIPE_BUF - 8,
* generally on Linux/Windows is 4088, on MacosX is 504
* message will be truncated if its length exceeds this limitation.
* it is identical to
* <pre>
* byte[] buf = message.getBytes(DEFAULT_ENCODING);
* return broadcastEvent(tag, buf, 0, buf.length);
* </pre>
*/
public static int broadcastEvent(long tag, String message) {
byte[] buf = message.getBytes(DEFAULT_ENCODING);
return broadcastEvent(tag, buf, 0, buf.length);
}
/**
* broadcast event to all nginx workers, message length, viz. message.getBytes("utf-8").length, must be less than PIPE_BUF - 8,
* generally on Linux/Windows is 4088, on MacosX is 504
* message will be truncated if its length exceeds this limitation.
* it is identical to
* <pre>
* byte[] buf = message.getBytes(DEFAULT_ENCODING);
* return broadcastEvent(POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START, buf, 0, buf.length);
* </pre>
*/
public static int broadcastEvent(String message) {
byte[] buf = message.getBytes(DEFAULT_ENCODING);
return broadcastEvent(POST_EVENT_TYPE_COMPLEX_EVENT_IDX_START, buf, 0, buf.length);
}
public static final class BatchCallRunner implements Runnable {
Coroutine parent;
int[] counter;
@SuppressWarnings("rawtypes")
Callable handler;
int order;
Object[] results;
@SuppressWarnings("rawtypes")
public BatchCallRunner(Coroutine parent, int[] counter, Callable handler,
int order, Object[] results) {
super();
this.parent = parent;
this.counter = counter;
this.handler = handler;
this.order = order;
this.results = results;
}
@Override
public void run() throws SuspendExecution {
try {
results[order] = handler.call();
} catch(Throwable e) {
log.error("error in sub coroutine", e);
}
if ( --counter[0] == 0 && parent != null && parent.getState() == Coroutine.State.SUSPENDED) {
parent.resume();
}
}
}
public static final Object[] coBatchCall(@SuppressWarnings("unchecked") Callable<Object> ...calls) {
int c = calls.length;
int[] counter = new int[] {c};
Object[] results = new Object[c];
Coroutine parent = Coroutine.getActiveCoroutine();
if (parent == null && (JavaAgent.db == null || !JavaAgent.db.isRunTool())) {
log.warn("we are not in coroutine enabled context, so we turn to use thread for only testing usage!");
@SuppressWarnings("rawtypes")
Future[] futures = new Future[c];
for (int i = 0; i < c ; i++) {
BatchCallRunner bcr = new BatchCallRunner(parent, counter, calls[i], i, results);
if (threadPoolOnlyForTestingUsage == null) {
initThreadPoolOnlyForTestingUsage();
}
futures[i] = threadPoolOnlyForTestingUsage.submit(bcr);
}
for (@SuppressWarnings("rawtypes") Future f : futures) {
try {
f.get();
} catch (Throwable e) {
log.error("do future failed", e);
}
}
}else {
boolean shouldYieldParent = false;
for (int i = 0; i < c ; i++) {
Coroutine co = new Coroutine(new BatchCallRunner(parent, counter, calls[i], i, results));
co.resume();
if (co.getState() != Coroutine.State.FINISHED) {
shouldYieldParent = true;
}
}
if (parent != null && shouldYieldParent) {
Coroutine.yield();
}
}
return results;
}
public static ByteBuffer pickByteBuffer() {
// if (defaultByteBuffer != null) {
// defaultByteBuffer.clear();
// return defaultByteBuffer;
// }
ByteBuffer bb = threadLocalByteBuffers.get();
if (bb == null) {
threadLocalByteBuffers.set(bb = ByteBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE));
} else {
bb.clear();
}
return bb;
}
public static CharBuffer pickCharBuffer() {
// if (defaultCharBuffer != null) {
// defaultCharBuffer.clear();
// return defaultCharBuffer;
// }
CharBuffer cb = threadLocalCharBuffers.get();
if (cb == null) {
threadLocalCharBuffers.set(cb = CharBuffer.allocate(NGINX_CLOJURE_CORE_CLIENT_HEADER_MAX_SIZE));
} else {
cb.clear();
}
return cb;
}
}
| nginx-clojure/nginx-clojure | src/java/nginx/clojure/NginxClojureRT.java |
1,504 | //Given an array of integers, find if the array contains any duplicates. Your function should return
//true if any value appears at least twice in the array, and it should return false if every element is distinct.
class ContainsDuplicate {
public boolean containsDuplicate(int[] nums) {
HashMap<Integer, Integer> map = new HashMap<Integer, Integer>();
for(int i: nums) {
if(map.containsKey(i)) {
return true;
} else {
map.put(i, 1);
}
}
return false;
}
}
| kdn251/interviews | leetcode/hash-table/ContainsDuplicate.java |
1,509 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.cli;
import joptsimple.OptionException;
import joptsimple.OptionParser;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.elasticsearch.core.SuppressForbidden;
import java.io.Closeable;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Arrays;
/**
* An action to execute within a cli.
*/
public abstract class Command implements Closeable {
/** A description of the command, used in the help output. */
protected final String description;
/** The option parser for this command. */
protected final OptionParser parser = new OptionParser();
private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "Show help").forHelp();
private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "Show minimal output");
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "Show verbose output")
.availableUnless(silentOption);
/**
* Construct the command with the specified command description and runnable to execute before main is invoked.
* @param description the command description
*
*/
public Command(final String description) {
this.description = description;
}
/** Parses options for this command from args and executes it. */
public final int main(String[] args, Terminal terminal, ProcessInfo processInfo) throws IOException {
try {
mainWithoutErrorHandling(args, terminal, processInfo);
} catch (OptionException e) {
// print help to stderr on exceptions
printHelp(terminal, true);
terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
return ExitCodes.USAGE;
} catch (UserException e) {
if (e.exitCode == ExitCodes.USAGE) {
printHelp(terminal, true);
}
printUserException(terminal, e);
return e.exitCode;
} catch (IOException ioe) {
terminal.errorPrintln(ioe);
return ExitCodes.IO_ERROR;
} catch (Throwable t) {
// It's acceptable to catch Throwable at this point:
// We're about to exit and only want to print the stacktrace with appropriate formatting (e.g. JSON).
terminal.errorPrintln(t);
return ExitCodes.CODE_ERROR;
}
return ExitCodes.OK;
}
/**
* Executes the command, but all errors are thrown.
*/
protected void mainWithoutErrorHandling(String[] args, Terminal terminal, ProcessInfo processInfo) throws Exception {
final OptionSet options = parseOptions(args);
if (options.has(helpOption)) {
printHelp(terminal, false);
return;
}
if (options.has(silentOption)) {
terminal.setVerbosity(Terminal.Verbosity.SILENT);
} else if (options.has(verboseOption)) {
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
} else {
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
}
execute(terminal, options, processInfo);
}
/**
* Parse command line arguments for this command.
* @param args The string arguments passed to the command
* @return A set of parsed options
*/
public OptionSet parseOptions(String[] args) {
return parser.parse(args);
}
/** Prints a help message for the command to the terminal. */
private void printHelp(Terminal terminal, boolean toStdError) throws IOException {
StringWriter writer = new StringWriter();
parser.printHelpOn(writer);
if (toStdError) {
terminal.errorPrintln(description);
terminal.errorPrintln("");
terminal.errorPrintln(writer.toString());
} else {
terminal.println(description);
terminal.println("");
printAdditionalHelp(terminal);
terminal.println(writer.toString());
}
}
/** Prints additional help information, specific to the command */
protected void printAdditionalHelp(Terminal terminal) {}
protected void printUserException(Terminal terminal, UserException e) {
if (e.getMessage() != null) {
terminal.errorPrintln("");
terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage() + ", with exit code " + e.exitCode);
}
}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
protected static void exit(int status) {
System.exit(status);
}
/**
* Executes this command.
*
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */
protected abstract void execute(Terminal terminal, OptionSet options, ProcessInfo processInfo) throws Exception;
@Override
public void close() throws IOException {
}
}
| elastic/elasticsearch | libs/cli/src/main/java/org/elasticsearch/cli/Command.java |
1,514 | class Solution {
public boolean wordPattern(String pattern, String str) {
HashMap<Character, String> map = new HashMap<>();
HashSet<String> set = new HashSet<>();
String[] array = str.split(" ");
if (pattern.length() != array.length) {
return false;
}
for (int i = 0; i < pattern.length(); i++) {
char key = pattern.charAt(i);
if (!map.containsKey(key)) {
if (set.contains(array[i])) {
return false;
}
map.put(key, array[i]);
set.add(array[i]);
} else {
if (!map.get(key).equals(array[i])) {
return false;
}
}
}
return true;
}
} | MisterBooo/LeetCodeAnimation | 0290-Word-Pattern/Code/1.java |
1,515 | /*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.base;
import com.google.common.annotations.GwtCompatible;
import javax.annotation.CheckForNull;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* Legacy version of {@link java.util.function.Predicate java.util.function.Predicate}. Determines a
* true or false value for a given input.
*
* <p>As this interface extends {@code java.util.function.Predicate}, an instance of this type may
* be used as a {@code Predicate} directly. To use a {@code java.util.function.Predicate} where a
* {@code com.google.common.base.Predicate} is expected, use the method reference {@code
* predicate::test}.
*
* <p>This interface is now a legacy type. Use {@code java.util.function.Predicate} (or the
* appropriate primitive specialization such as {@code IntPredicate}) instead whenever possible.
* Otherwise, at least reduce <i>explicit</i> dependencies on this type by using lambda expressions
* or method references instead of classes, leaving your code easier to migrate in the future.
*
* <p>The {@link Predicates} class provides common predicates and related utilities.
*
* <p>See the Guava User Guide article on <a
* href="https://github.com/google/guava/wiki/FunctionalExplained">the use of {@code Predicate}</a>.
*
* @author Kevin Bourrillion
* @since 2.0
*/
@FunctionalInterface
@GwtCompatible
@ElementTypesAreNonnullByDefault
public interface Predicate<T extends @Nullable Object> extends java.util.function.Predicate<T> {
/**
* Returns the result of applying this predicate to {@code input} (Java 8+ users, see notes in the
* class documentation above). This method is <i>generally expected</i>, but not absolutely
* required, to have the following properties:
*
* <ul>
* <li>Its execution does not cause any observable side effects.
* <li>The computation is <i>consistent with equals</i>; that is, {@link Objects#equal
* Objects.equal}{@code (a, b)} implies that {@code predicate.apply(a) ==
* predicate.apply(b))}.
* </ul>
*
* @throws NullPointerException if {@code input} is null and this predicate does not accept null
* arguments
*/
boolean apply(@ParametricNullness T input);
/**
* Indicates whether another object is equal to this predicate.
*
* <p>Most implementations will have no reason to override the behavior of {@link Object#equals}.
* However, an implementation may also choose to return {@code true} whenever {@code object} is a
* {@link Predicate} that it considers <i>interchangeable</i> with this one. "Interchangeable"
* <i>typically</i> means that {@code this.apply(t) == that.apply(t)} for all {@code t} of type
* {@code T}). Note that a {@code false} result from this method does not imply that the
* predicates are known <i>not</i> to be interchangeable.
*/
@Override
boolean equals(@CheckForNull Object object);
@Override
default boolean test(@ParametricNullness T input) {
return apply(input);
}
}
| google/guava | guava/src/com/google/common/base/Predicate.java |
1,525 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.mute;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Optional;
import lombok.extern.slf4j.Slf4j;
/**
* Mute pattern is utilized when we need to suppress an exception due to an API flaw or in situation
* when all we can do to handle the exception is to log it. This pattern should not be used
* everywhere. It is very important to logically handle the exceptions in a system, but some
* situations like the ones described above require this pattern, so that we don't need to repeat
* <pre>
* <code>
* try {
* // code that may throwing exception we need to ignore or may never be thrown
* } catch (Exception ex) {
* // ignore by logging or throw error if unexpected exception occurs
* }
* </code>
* </pre> every time we need to ignore an exception.
*/
@Slf4j
public class App {
/**
* Program entry point.
*
* @param args command line args.
*/
public static void main(String[] args) {
useOfLoggedMute();
useOfMute();
}
/*
* Typically used when the API declares some exception but cannot do so. Usually a
* signature mistake.In this example out is not supposed to throw exception as it is a
* ByteArrayOutputStream. So we utilize mute, which will throw AssertionError if unexpected
* exception occurs.
*/
private static void useOfMute() {
var out = new ByteArrayOutputStream();
Mute.mute(() -> out.write("Hello".getBytes()));
}
private static void useOfLoggedMute() {
Optional<Resource> resource = Optional.empty();
try {
resource = Optional.of(acquireResource());
utilizeResource(resource.get());
} finally {
resource.ifPresent(App::closeResource);
}
}
/*
* All we can do while failed close of a resource is to log it.
*/
private static void closeResource(Resource resource) {
Mute.loggedMute(resource::close);
}
private static void utilizeResource(Resource resource) {
LOGGER.info("Utilizing acquired resource: {}", resource);
}
private static Resource acquireResource() {
return new Resource() {
@Override
public void close() throws IOException {
throw new IOException("Error in closing resource: " + this);
}
};
}
}
| smedals/java-design-patterns | mute-idiom/src/main/java/com/iluwatar/mute/App.java |
1,533 | /*
* This project is licensed under the MIT license. Module model-view-viewmodel is using ZK framework licensed under LGPL (see lgpl-3.0.txt).
*
* The MIT License
* Copyright © 2014-2022 Ilkka Seppälä
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.iluwatar.state;
import lombok.extern.slf4j.Slf4j;
/**
* Peaceful state.
*/
@Slf4j
public class PeacefulState implements State {
private final Mammoth mammoth;
public PeacefulState(Mammoth mammoth) {
this.mammoth = mammoth;
}
@Override
public void observe() {
LOGGER.info("{} is calm and peaceful.", mammoth);
}
@Override
public void onEnterState() {
LOGGER.info("{} calms down.", mammoth);
}
}
| smedals/java-design-patterns | state/src/main/java/com/iluwatar/state/PeacefulState.java |
1,535 | //Given a collection of distinct numbers, return all possible permutations.
//
//For example,
//[1,2,3] have the following permutations:
//[
//[1,2,3],
//[1,3,2],
//[2,1,3],
//[2,3,1],
//[3,1,2],
//[3,2,1]
//]
class Permutations {
public List<List<Integer>> permute(int[] nums) {
LinkedList<List<Integer>> result = new LinkedList<List<Integer>>();
result.add(new ArrayList<Integer>());
for (int n: nums) {
int size = result.size();
while(size > 0) {
List<Integer> current = result.pollFirst();
for (int i = 0; i <= current.size(); i++) {
List<Integer> temp = new ArrayList<Integer>(current);
temp.add(i, n);
result.add(temp);
}
size--;
}
}
return result;
}
}
| kdn251/interviews | company/linkedin/Permutations.java |
1,539 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.index;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.core.CheckedConsumer;
import java.io.IOException;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicReference;
/**
* Utilities to help with closing shards and indices
*/
public class CloseUtils {
private CloseUtils() {/* no instances */}
/**
* Sentinel result value to record success
*/
private static final Exception SUCCEEDED = new Exception() {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
};
/**
* Execute a naturally-async action (e.g. to close a shard) but using the current thread so that it completes synchronously, re-throwing
* any exception that might be passed to its listener.
*/
public static void executeDirectly(CheckedConsumer<ActionListener<Void>, IOException> action) throws IOException {
// it's possible to do this with a PlainActionFuture too but extracting the exact Exception is a bit of a pain because of
// exception-mangling and/or interrupt handling - see #108125
final var closeExceptionRef = new AtomicReference<Exception>();
ActionListener.run(ActionListener.assertOnce(new ActionListener<>() {
@Override
public void onResponse(Void unused) {
closeExceptionRef.set(SUCCEEDED);
}
@Override
public void onFailure(Exception e) {
closeExceptionRef.set(e);
}
}), action);
final var closeException = closeExceptionRef.get();
if (closeException == SUCCEEDED) {
return;
}
if (closeException instanceof RuntimeException runtimeException) {
throw runtimeException;
}
if (closeException instanceof IOException ioException) {
throw ioException;
}
assert false : closeException;
if (closeException != null) {
throw new RuntimeException("unexpected exception on shard close", closeException);
} // else listener not completed, definitely a bug, but throwing something won't help anyone here
}
/**
* Utility shard-close executor for the cases where we close an {@link IndexService} without having created any shards, so we can assert
* that it's never used.
*/
public static final Executor NO_SHARDS_CREATED_EXECUTOR = r -> {
assert false : r;
r.run(); // just in case we're wrong, in production we need to actually run the task
};
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/index/CloseUtils.java |
1,547 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.grok;
import org.jcodings.specific.UTF8Encoding;
import org.joni.Matcher;
import org.joni.NameEntry;
import org.joni.Option;
import org.joni.Regex;
import org.joni.Region;
import org.joni.Syntax;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Function;
public final class Grok {
private static final String NAME_GROUP = "name";
private static final String SUBNAME_GROUP = "subname";
private static final String PATTERN_GROUP = "pattern";
private static final String DEFINITION_GROUP = "definition";
private static final String GROK_PATTERN = "%\\{"
+ "(?<name>"
+ "(?<pattern>[A-z0-9]+)"
+ "(?::(?<subname>[[:alnum:]@\\[\\]_:.-]+))?"
+ ")"
+ "(?:=(?<definition>"
+ "(?:[^{}]+|\\.+)+"
+ ")"
+ ")?"
+ "\\}";
private static final Regex GROK_PATTERN_REGEX = new Regex(
GROK_PATTERN.getBytes(StandardCharsets.UTF_8),
0,
GROK_PATTERN.getBytes(StandardCharsets.UTF_8).length,
Option.NONE,
UTF8Encoding.INSTANCE,
Syntax.DEFAULT
);
private static final int MAX_TO_REGEX_ITERATIONS = 100_000; // sanity limit
private final boolean namedCaptures;
private final Regex compiledExpression;
private final MatcherWatchdog matcherWatchdog;
private final List<GrokCaptureConfig> captureConfig;
public Grok(PatternBank patternBank, String grokPattern, Consumer<String> logCallBack) {
this(patternBank, grokPattern, true, MatcherWatchdog.noop(), logCallBack);
}
public Grok(PatternBank patternBank, String grokPattern, MatcherWatchdog matcherWatchdog, Consumer<String> logCallBack) {
this(patternBank, grokPattern, true, matcherWatchdog, logCallBack);
}
Grok(PatternBank patternBank, String grokPattern, boolean namedCaptures, Consumer<String> logCallBack) {
this(patternBank, grokPattern, namedCaptures, MatcherWatchdog.noop(), logCallBack);
}
private Grok(
PatternBank patternBank,
String grokPattern,
boolean namedCaptures,
MatcherWatchdog matcherWatchdog,
Consumer<String> logCallBack
) {
this.namedCaptures = namedCaptures;
this.matcherWatchdog = matcherWatchdog;
String expression = toRegex(patternBank, grokPattern);
byte[] expressionBytes = expression.getBytes(StandardCharsets.UTF_8);
this.compiledExpression = new Regex(
expressionBytes,
0,
expressionBytes.length,
Option.DEFAULT,
UTF8Encoding.INSTANCE,
logCallBack::accept
);
List<GrokCaptureConfig> grokCaptureConfigs = new ArrayList<>();
for (Iterator<NameEntry> entry = compiledExpression.namedBackrefIterator(); entry.hasNext();) {
grokCaptureConfigs.add(new GrokCaptureConfig(entry.next()));
}
this.captureConfig = List.copyOf(grokCaptureConfigs);
}
private static String groupMatch(String name, Region region, String pattern) {
int number = GROK_PATTERN_REGEX.nameToBackrefNumber(
name.getBytes(StandardCharsets.UTF_8),
0,
name.getBytes(StandardCharsets.UTF_8).length,
region
);
int begin = region.beg[number];
int end = region.end[number];
if (begin < 0) { // no match found
return null;
}
return new String(pattern.getBytes(StandardCharsets.UTF_8), begin, end - begin, StandardCharsets.UTF_8);
}
/**
* converts a grok expression into a named regex expression
*
* @return named regex expression
*/
String toRegex(PatternBank patternBank, String grokPattern) {
StringBuilder res = new StringBuilder();
for (int i = 0; i < MAX_TO_REGEX_ITERATIONS; i++) {
byte[] grokPatternBytes = grokPattern.getBytes(StandardCharsets.UTF_8);
Matcher matcher = GROK_PATTERN_REGEX.matcher(grokPatternBytes);
int result;
try {
matcherWatchdog.register(matcher);
result = matcher.search(0, grokPatternBytes.length, Option.NONE);
} finally {
matcherWatchdog.unregister(matcher);
}
if (result < 0) {
return res.append(grokPattern).toString();
}
Region region = matcher.getEagerRegion();
String namedPatternRef = groupMatch(NAME_GROUP, region, grokPattern);
String subName = groupMatch(SUBNAME_GROUP, region, grokPattern);
// TODO(tal): Support definitions
@SuppressWarnings("unused")
String definition = groupMatch(DEFINITION_GROUP, region, grokPattern);
String patternName = groupMatch(PATTERN_GROUP, region, grokPattern);
String pattern = patternBank.get(patternName);
if (pattern == null) {
throw new IllegalArgumentException("Unable to find pattern [" + patternName + "] in Grok's pattern dictionary");
}
if (pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":")) {
throw new IllegalArgumentException("circular reference in pattern back [" + patternName + "]");
}
String grokPart;
if (namedCaptures && subName != null) {
grokPart = String.format(Locale.US, "(?<%s>%s)", namedPatternRef, pattern);
} else if (namedCaptures) {
grokPart = String.format(Locale.US, "(?:%s)", pattern);
} else {
grokPart = String.format(Locale.US, "(?<%s>%s)", patternName + "_" + result, pattern);
}
String start = new String(grokPatternBytes, 0, result, StandardCharsets.UTF_8);
String rest = new String(grokPatternBytes, region.end[0], grokPatternBytes.length - region.end[0], StandardCharsets.UTF_8);
grokPattern = grokPart + rest;
res.append(start);
}
throw new IllegalArgumentException("Can not convert grok patterns to regular expression");
}
/**
* Checks whether a specific text matches the defined grok expression.
*
* @param text the string to match
* @return true if grok expression matches text or there is a timeout, false otherwise.
*/
public boolean match(String text) {
Matcher matcher = compiledExpression.matcher(text.getBytes(StandardCharsets.UTF_8));
int result;
try {
matcherWatchdog.register(matcher);
result = matcher.search(0, text.length(), Option.DEFAULT);
} finally {
matcherWatchdog.unregister(matcher);
}
return (result != -1);
}
/**
* Matches and returns any named captures.
*
* @param text the text to match and extract values from.
* @return a map containing field names and their respective coerced values that matched or null if the pattern didn't match
*/
public Map<String, Object> captures(String text) {
return innerCaptures(text, cfg -> cfg::objectExtracter);
}
/**
* Matches and returns the ranges of any named captures.
*
* @param text the text to match and extract values from.
* @return a map containing field names and their respective ranges that matched or null if the pattern didn't match
*/
public Map<String, Object> captureRanges(String text) {
return innerCaptures(text, cfg -> cfg::rangeExtracter);
}
private Map<String, Object> innerCaptures(
String text,
Function<GrokCaptureConfig, Function<Consumer<Object>, GrokCaptureExtracter>> getExtracter
) {
byte[] utf8Bytes = text.getBytes(StandardCharsets.UTF_8);
GrokCaptureExtracter.MapExtracter extracter = new GrokCaptureExtracter.MapExtracter(captureConfig, getExtracter);
if (match(utf8Bytes, 0, utf8Bytes.length, extracter)) {
return extracter.result();
}
return null;
}
/**
* Matches and collects any named captures.
* @param utf8Bytes array containing the text to match against encoded in utf-8
* @param offset offset {@code utf8Bytes} of the start of the text
* @param length length of the text to match
* @param extracter collector for captures. {@link GrokCaptureConfig#nativeExtracter} can build these.
* @return true if there was a match, false otherwise
* @throws RuntimeException if there was a timeout
*/
public boolean match(byte[] utf8Bytes, int offset, int length, GrokCaptureExtracter extracter) {
Matcher matcher = compiledExpression.matcher(utf8Bytes, offset, offset + length);
int result;
try {
matcherWatchdog.register(matcher);
result = matcher.search(offset, offset + length, Option.DEFAULT);
} finally {
matcherWatchdog.unregister(matcher);
}
if (result == Matcher.INTERRUPTED) {
throw new RuntimeException(
"grok pattern matching was interrupted after [" + matcherWatchdog.maxExecutionTimeInMillis() + "] ms"
);
}
if (result == Matcher.FAILED) {
return false;
}
extracter.extract(utf8Bytes, offset, matcher.getEagerRegion());
return true;
}
/**
* The list of values that this {@linkplain Grok} can capture.
*/
public List<GrokCaptureConfig> captureConfig() {
return captureConfig;
}
public Regex getCompiledExpression() {
return compiledExpression;
}
}
| elastic/elasticsearch | libs/grok/src/main/java/org/elasticsearch/grok/Grok.java |
1,549 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.health;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.xcontent.ChunkedToXContent;
import org.elasticsearch.common.xcontent.ChunkedToXContentHelper;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.xcontent.ToXContent;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.health.HealthService.HEALTH_API_ID_PREFIX;
/**
* Details a potential issue that was diagnosed by a {@link HealthService}.
*
* @param definition The definition of the diagnosis (e.g. message, helpURL)
* @param affectedResources Optional list of "things" that are affected by this condition (e.g. shards, indices, or policies).
*/
public record Diagnosis(Definition definition, @Nullable List<Resource> affectedResources) implements ChunkedToXContent {
/**
* Represents a type of affected resource, together with the resources/abstractions that
* are affected.
*/
public static class Resource implements ChunkedToXContent {
public static final String ID_FIELD = "id";
public static final String NAME_FIELD = "name";
public enum Type {
INDEX("indices"),
NODE("nodes"),
SLM_POLICY("slm_policies"),
ILM_POLICY("ilm_policies"),
FEATURE_STATE("feature_states"),
SNAPSHOT_REPOSITORY("snapshot_repositories");
private final String displayValue;
Type(String displayValue) {
this.displayValue = displayValue;
}
}
private final Type type;
@Nullable
private Collection<String> values;
@Nullable
private Collection<DiscoveryNode> nodes;
public Resource(Type type, Collection<String> values) {
if (type == Type.NODE) {
throw new IllegalArgumentException("Nodes should be modelled using the dedicated constructor");
}
this.type = type;
this.values = values;
}
public Resource(Collection<DiscoveryNode> nodes) {
this.type = Type.NODE;
this.nodes = nodes;
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params outerParams) {
final Iterator<? extends ToXContent> valuesIterator;
if (nodes != null) {
valuesIterator = Iterators.map(nodes.iterator(), node -> (builder, params) -> {
builder.startObject();
builder.field(ID_FIELD, node.getId());
if (node.getName() != null) {
builder.field(NAME_FIELD, node.getName());
}
builder.endObject();
return builder;
});
} else {
valuesIterator = Iterators.map(values.iterator(), value -> (builder, params) -> builder.value(value));
}
return ChunkedToXContentHelper.array(type.displayValue, valuesIterator);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Resource resource = (Resource) o;
return type == resource.type && Objects.equals(values, resource.values) && Objects.equals(nodes, resource.nodes);
}
@Override
public int hashCode() {
return Objects.hash(type, values, nodes);
}
public Type getType() {
return type;
}
@Nullable
public Collection<String> getValues() {
return values;
}
@Nullable
public Collection<DiscoveryNode> getNodes() {
return nodes;
}
}
/**
* Details a diagnosis - cause and a potential action that a user could take to clear an issue identified by a {@link HealthService}.
*
* @param indicatorName The name of the health indicator service that will generate this diagnosis
* @param id An identifier unique to this diagnosis across the health indicator that generates it
* @param cause A description of the cause of the problem
* @param action A description of the action to be taken to remedy the problem
* @param helpURL Optional evergreen url to a help document
*/
public record Definition(String indicatorName, String id, String cause, String action, String helpURL) {
public String getUniqueId() {
return HEALTH_API_ID_PREFIX + indicatorName + ":diagnosis:" + id;
}
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params outerParams) {
final Iterator<? extends ToXContent> resourcesIterator;
if (affectedResources == null) {
resourcesIterator = Collections.emptyIterator();
} else {
resourcesIterator = Iterators.flatMap(affectedResources.iterator(), s -> s.toXContentChunked(outerParams));
}
return Iterators.concat(Iterators.single((ToXContent) (builder, params) -> {
builder.startObject();
builder.field("id", definition.getUniqueId());
builder.field("cause", definition.cause);
builder.field("action", definition.action);
builder.field("help_url", definition.helpURL);
if (affectedResources != null && affectedResources.size() > 0) {
builder.startObject("affected_resources");
}
return builder;
}), resourcesIterator, Iterators.single((builder, params) -> {
if (affectedResources != null && affectedResources.size() > 0) {
builder.endObject();
}
builder.endObject();
return builder;
}));
}
}
| elastic/elasticsearch | server/src/main/java/org/elasticsearch/health/Diagnosis.java |