id
int64 22
34.9k
| comment_id
int64 0
328
| comment
stringlengths 2
2.55k
| code
stringlengths 31
107k
| classification
stringclasses 6
values | isFinished
bool 1
class | code_context_2
stringlengths 21
27.3k
| code_context_10
stringlengths 29
27.3k
| code_context_20
stringlengths 29
27.3k
|
---|---|---|---|---|---|---|---|---|
17,729 | 1 |
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
IMPLEMENTATION
| true |
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
17,729 | 2 |
// set the row and column in the correct location of the Sudoku Board
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
NONSATD
| true |
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
17,729 | 3 |
// set the layout params and add the EditText to the GridLayout parent
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
NONSATD
| true |
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
public void setParentAtRowAndColumn(GridLayout parent, int row, int col)
{
// prepare the layout parameters for the EditText
// TODO: Consider caching the layout params and only changing the spec row and spec column
LayoutParams layoutParams = new GridLayout.LayoutParams();
layoutParams.width = LayoutParams.WRAP_CONTENT;
layoutParams.height = LayoutParams.WRAP_CONTENT;
// set the row and column in the correct location of the Sudoku Board
layoutParams.rowSpec = GridLayout.spec(row);
layoutParams.columnSpec = GridLayout.spec(col);
// set the layout params and add the EditText to the GridLayout parent
_text.setLayoutParams(layoutParams);
parent.addView(_text);
}
|
9,539 | 0 |
//TODO escape special characters
|
private void filter() {
String text = DrugMappingStringUtilities.safeToUpperCase(searchField.getText());
if (text.length() == 0) {
rowSorter.setRowFilter(null);
}
else {
//TODO escape special characters
rowSorter.setRowFilter(RowFilter.regexFilter(text));
}
if (rowSorter.getViewRowCount() == 0) {
ingredientMappingLogPanel.removeAll();
ingredientMappingResultPanel.removeAll();
mainFrame.getFrame().repaint();
}
if (ingredientsTable.getRowCount() > 0) {
ListSelectionModel selectionModel = ingredientsTable.getSelectionModel();
selectionModel.setSelectionInterval(0, 0);
}
}
|
IMPLEMENTATION
| true |
}
else {
//TODO escape special characters
rowSorter.setRowFilter(RowFilter.regexFilter(text));
}
|
private void filter() {
String text = DrugMappingStringUtilities.safeToUpperCase(searchField.getText());
if (text.length() == 0) {
rowSorter.setRowFilter(null);
}
else {
//TODO escape special characters
rowSorter.setRowFilter(RowFilter.regexFilter(text));
}
if (rowSorter.getViewRowCount() == 0) {
ingredientMappingLogPanel.removeAll();
ingredientMappingResultPanel.removeAll();
mainFrame.getFrame().repaint();
}
if (ingredientsTable.getRowCount() > 0) {
ListSelectionModel selectionModel = ingredientsTable.getSelectionModel();
selectionModel.setSelectionInterval(0, 0);
|
private void filter() {
String text = DrugMappingStringUtilities.safeToUpperCase(searchField.getText());
if (text.length() == 0) {
rowSorter.setRowFilter(null);
}
else {
//TODO escape special characters
rowSorter.setRowFilter(RowFilter.regexFilter(text));
}
if (rowSorter.getViewRowCount() == 0) {
ingredientMappingLogPanel.removeAll();
ingredientMappingResultPanel.removeAll();
mainFrame.getFrame().repaint();
}
if (ingredientsTable.getRowCount() > 0) {
ListSelectionModel selectionModel = ingredientsTable.getSelectionModel();
selectionModel.setSelectionInterval(0, 0);
}
}
|
34,120 | 0 |
// TODO: see if we can replace the words with icons in the future
|
private void setupViewPager(ViewPager viewPager) {
// TODO: see if we can replace the words with icons in the future
mSectionsPageAdapter.addFragment(new HomeFragment(), "Home");
mSectionsPageAdapter.addFragment(new SettingsFragment(), "Settings");
viewPager.setAdapter(mSectionsPageAdapter);
}
|
DESIGN
| true |
private void setupViewPager(ViewPager viewPager) {
// TODO: see if we can replace the words with icons in the future
mSectionsPageAdapter.addFragment(new HomeFragment(), "Home");
mSectionsPageAdapter.addFragment(new SettingsFragment(), "Settings");
|
private void setupViewPager(ViewPager viewPager) {
// TODO: see if we can replace the words with icons in the future
mSectionsPageAdapter.addFragment(new HomeFragment(), "Home");
mSectionsPageAdapter.addFragment(new SettingsFragment(), "Settings");
viewPager.setAdapter(mSectionsPageAdapter);
}
|
private void setupViewPager(ViewPager viewPager) {
// TODO: see if we can replace the words with icons in the future
mSectionsPageAdapter.addFragment(new HomeFragment(), "Home");
mSectionsPageAdapter.addFragment(new SettingsFragment(), "Settings");
viewPager.setAdapter(mSectionsPageAdapter);
}
|
9,544 | 0 |
// The superclass uses getOffsetHeight, which won't work for us.
|
@Override
public int getPreferredHeight() {
// The superclass uses getOffsetHeight, which won't work for us.
return ComponentConstants.VIDEOPLAYER_PREFERRED_HEIGHT;
}
|
DEFECT
| true |
@Override
public int getPreferredHeight() {
// The superclass uses getOffsetHeight, which won't work for us.
return ComponentConstants.VIDEOPLAYER_PREFERRED_HEIGHT;
}
|
@Override
public int getPreferredHeight() {
// The superclass uses getOffsetHeight, which won't work for us.
return ComponentConstants.VIDEOPLAYER_PREFERRED_HEIGHT;
}
|
@Override
public int getPreferredHeight() {
// The superclass uses getOffsetHeight, which won't work for us.
return ComponentConstants.VIDEOPLAYER_PREFERRED_HEIGHT;
}
|
25,930 | 0 |
// TODO: copy header file inclusion
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
IMPLEMENTATION
| true |
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
|
25,930 | 1 |
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
|
25,930 | 2 |
/* copy headers from the current file to the file with the clone */
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
|
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
|
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
|
25,930 | 3 |
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
|
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
|
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
|
25,930 | 4 |
// Add as many ../ as folders in the relative folder
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
|
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
|
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
|
25,930 | 5 |
// Add relative folder of original file
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
|
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
|
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
|
25,930 | 6 |
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
|
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
|
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
25,930 | 7 |
// If angled, ignore
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
|
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
|
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
25,930 | 8 |
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
|
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
25,930 | 9 |
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
|
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
25,930 | 10 |
// Add includes
|
@Override
// TODO: copy header file inclusion
public AFunction cloneOnFileImpl(String newName, AFile file) {
// if (!function.hasBody()) {
// /*add the clone to the original place in order to be included where needed */
// return makeCloneAndInsert(newName, function, true);
// }
/* if this is a definition, add the clone to the correct file */
// App app = getRootImpl().getNode();
//
// Optional<TranslationUnit> file = app.getFile(fileName);
//
// if (!file.isPresent()) {
//
// TranslationUnit tu = getFactory().translationUnit(new File(fileName), Collections.emptyList());
//
// app.addFile(tu);
//
// file = Optional.of(tu);
// }
var tu = (TranslationUnit) file.getNode();
var cloneFunction = makeCloneAndInsert(newName, tu, true);
/* copy headers from the current file to the file with the clone */
TranslationUnit originalFile = function.getAncestorTry(TranslationUnit.class).orElse(null);
if (originalFile != null) {
var includesCopy = TreeNodeUtils.copy(originalFile.getIncludes().getIncludes());
// List<IncludeDecl> allIncludes = getIncludesCopyFromFile(originalFile);
File baseIncludePath = null;
// Add as many ../ as folders in the relative folder
var relativeFolderDepth = tu.getRelativeFolderpath().map(folder -> SpecsIo.getDepth(new File(folder)))
.orElse(0);
for (int i = 0; i < relativeFolderDepth; i++) {
baseIncludePath = new File(baseIncludePath, "../");
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
NONSATD
| true |
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
|
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
}
// Add relative folder of original file
var relativeDepth = baseIncludePath;
baseIncludePath = originalFile.getRelativeFolderpath()
.map(relativeFolder -> new File(relativeDepth, relativeFolder))
.orElse(baseIncludePath);
// System.out.println("BASE: " + baseIncludePath);
// System.out.println("DEPTH: " + relativeFolderDepth);
// Adapt includes
for (var includeDecl : includesCopy) {
var include = includeDecl.getInclude();
// If angled, ignore
if (include.isAngled()) {
continue;
}
// System.out.println("INCLUDE BEFORE: " + includeDecl.getCode());
var newInclude = include.setInclude(new File(baseIncludePath, include.getInclude()).toString());
includeDecl.set(IncludeDecl.INCLUDE, newInclude);
// System.out.println("INCLUDE AFTER: " + includeDecl.getCode());
}
// Add includes
includesCopy.stream().forEach(tu::addInclude);
}
return cloneFunction;
}
|
9,553 | 0 |
//TODO: Add PID to move method
|
public void move(double inches, double power) {
robotInstance.drivetrain.povDrive(power, 0);
}
|
IMPLEMENTATION
| true |
public void move(double inches, double power) {
robotInstance.drivetrain.povDrive(power, 0);
}
|
public void move(double inches, double power) {
robotInstance.drivetrain.povDrive(power, 0);
}
|
public void move(double inches, double power) {
robotInstance.drivetrain.povDrive(power, 0);
}
|
25,939 | 0 |
/**
* Sets the new enum lists for this schema. The sets in the provided maps are converted into lists.
*
* @param enums The new enum sets for this schema.
*/
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
NONSATD
| true |
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
25,939 | 1 |
//Check that all the given keys are valid
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
NONSATD
| true |
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
25,939 | 2 |
//Todo check the type of the objects, for now just set them on the enum.
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
IMPLEMENTATION
| true |
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
|
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
|
public void setEnumsSet(Map<String, Set<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Object>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Object> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Object> valsList = Lists.newArrayList(vals);
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
25,940 | 0 |
/**
* Sets the new enum lists for this schema. The sets in the provided maps are converted into lists, and
* sorted according to their natural ordering.
*
* @param enums The new enum sets for this schema.
*/
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
NONSATD
| true |
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
25,940 | 1 |
//Check that all the given keys are valid
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
NONSATD
| true |
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
|
25,940 | 2 |
//Todo check the type of the objects, for now just set them on the enum.
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
IMPLEMENTATION
| true |
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
|
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
|
@SuppressWarnings({"rawtypes", "unchecked"})
public void setEnumsSetComparable(Map<String, Set<Comparable>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
Map<String, List<Object>> enumsList = Maps.newHashMap();
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, Set<Comparable>> entry : enums.entrySet()) {
String name = entry.getKey();
Set<Comparable> vals = entry.getValue();
Preconditions.checkNotNull(name);
Preconditions.checkNotNull(vals);
for (Object value : entry.getValue()) {
Preconditions.checkNotNull(value);
}
List<Comparable> valsListComparable = Lists.newArrayList(vals);
Collections.sort(valsListComparable);
List<Object> valsList = (List)valsListComparable;
enumsList.put(name, valsList);
}
currentEnumVals = Maps.newHashMap(enumsList);
}
|
25,941 | 0 |
/**
* Sets the new enum lists for this schema.
*
* @param enums The new enum lists for this schema.
*/
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
currentEnumVals = tempEnums;
}
|
NONSATD
| true |
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
currentEnumVals = tempEnums;
}
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
currentEnumVals = tempEnums;
}
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
currentEnumVals = tempEnums;
}
|
25,941 | 1 |
//Check that all the given keys are valid
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
currentEnumVals = tempEnums;
}
|
NONSATD
| true |
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
|
25,941 | 2 |
//Todo check the type of the objects, for now just set them on the enum.
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
currentEnumVals = tempEnums;
}
|
IMPLEMENTATION
| true |
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
|
public void setEnumsList(Map<String, List<Object>> enums)
{
Preconditions.checkNotNull(enums);
areEnumsUpdated = true;
//Check that all the given keys are valid
Preconditions.checkArgument(
configurationSchema.getKeyDescriptor().getFields().getFields().containsAll(enums.keySet()),
"The given map doesn't contain valid keys. Valid keys are %s and the provided keys are %s",
configurationSchema.getKeyDescriptor().getFields().getFields(),
enums.keySet());
//Todo check the type of the objects, for now just set them on the enum.
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
Preconditions.checkNotNull(entry.getKey());
Preconditions.checkNotNull(entry.getValue());
}
Map<String, List<Object>> tempEnums = Maps.newHashMap();
for (Map.Entry<String, List<Object>> entry : enums.entrySet()) {
String key = entry.getKey();
List<?> enumValues = entry.getValue();
List<Object> tempEnumValues = Lists.newArrayList();
for (Object enumValue : enumValues) {
tempEnumValues.add(enumValue);
}
tempEnums.put(key, tempEnumValues);
}
currentEnumVals = tempEnums;
}
|
17,766 | 0 |
/**
* When server receives a request with unknown session_id it must recognize that as request for a new session. When
* server opens a new session it must immediately send an frame containing a letter o.
* </p>
* Note: this test may periodically fail as we're relying on a multicore processor an non-blocking IO being
* reliable. This isn't ideal as tests should be determinate!
*
* @throws ExecutionException
* @throws InterruptedException
*/
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
TEST
| true |
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
17,766 | 1 |
// New line is a frame delimiter specific for xhr-polling"
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
NONSATD
| true |
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
|
17,766 | 2 |
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
NONSATD
| true |
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
|
17,766 | 3 |
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
NONSATD
| true |
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
|
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
|
17,766 | 4 |
// Sending messages to not existing sessions is invalid.
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
NONSATD
| true |
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
|
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
|
17,766 | 5 |
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
DESIGN
| true |
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
|
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
|
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
|
17,766 | 6 |
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
|
@Test
@RunAsClient
public void simpleSession() throws InterruptedException, ExecutionException {
final String uuid = uuid();
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
// New line is a frame delimiter specific for xhr-polling"
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("o\n", res.readEntity(String.class));
}
// After a session was established the server needs to accept requests for sending messages.
// Xhr-polling accepts messages as a list of JSON-encoded strings.
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"a\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
verifyEmptyEntity(res);
}
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
}
}
|
NONSATD
| true |
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
|
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
|
// We're using an echo service - we'll receive our message back. The message is encoded as an array 'a'.
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
assertEquals("a[\"a\"]\n", res.readEntity(String.class));
}
// Sending messages to not existing sessions is invalid.
try (ClosableResponse res = post(target("000", "bad_session", XHR_SEND), json("[\"a\"]"))) {
verify404(XHR_SEND, res);
}
// The session must time out after 5 seconds of not having a receiving connection. The server must send a
// heartbeat frame every 25 seconds. The heartbeat frame contains a single h character. This delay may be
// configurable.
// TODO
// The server must not allow two receiving connections to wait on a single session. In such case the server must
// send a close frame to the new connection.
for (int i = 0; i < 10; i++) {
try (ClosableResponse res = post(target("000", uuid, XHR_SEND), json("[\"xxxxxx\"]"))) {
assertEquals(Status.NO_CONTENT, res.getStatusInfo());
}
}
// Due to the time it takes for an async request to be scheduled it might actually be the one that returns the
// 'another connection still open' error. Therefore we need to check both.
final Future<Response> asyncFuture = target("000", uuid, XHR).request().async().post(json(null));
try (ClosableResponse res = post(target("000", uuid, XHR), json(null))) {
assertEquals(Status.OK, res.getStatusInfo());
final String resPayload = res.readEntity(String.class);
try (ClosableResponse asyncRes = closable(asyncFuture.get())) {
assertEquals(Status.OK, asyncRes.getStatusInfo());
final String asyncResPayload = asyncRes.readEntity(String.class);
if (ENABLE_CONCURRENT_REQUESTS_TEST) {
final String expectedError = "c[2010,\"Another connection still open\"]\n";
if (!expectedError.equals(resPayload) && !expectedError.equals(asyncResPayload)) {
fail("Neither response had '" + expectedError + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
final String expected = "a[\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\",\"xxxxxx\"]\n";
if (!expected.equals(resPayload) && !expected.equals(asyncResPayload)) {
fail("Neither response had '" + expected + "'! [blocking=" + resPayload + ",async=" + asyncResPayload + "]");
}
}
}
} finally {
asyncFuture.cancel(true);
|
25,966 | 0 |
// FIXME: this is the case of blendModes and scaleModes
|
private void buildConstant(JSONObject obj, Map<String, PhaserType> typeMap) {
if (obj.getString("kind").equals("constant")) {
String name = obj.getString("name");
String desc = obj.optString("description", "");
Object defaultValue = obj.opt("defaultvalue");
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
PhaserType type = typeMap.get(memberof);
if (!type.getMemberMap().containsKey(name)) {
type.getMemberMap().put(name, cons);
cons.setDeclType(type);
buildMeta(cons, obj);
}
}
}
}
|
DESIGN
| true |
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
|
private void buildConstant(JSONObject obj, Map<String, PhaserType> typeMap) {
if (obj.getString("kind").equals("constant")) {
String name = obj.getString("name");
String desc = obj.optString("description", "");
Object defaultValue = obj.opt("defaultvalue");
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
|
private void buildConstant(JSONObject obj, Map<String, PhaserType> typeMap) {
if (obj.getString("kind").equals("constant")) {
String name = obj.getString("name");
String desc = obj.optString("description", "");
Object defaultValue = obj.opt("defaultvalue");
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
|
25,966 | 1 |
// static flag
|
private void buildConstant(JSONObject obj, Map<String, PhaserType> typeMap) {
if (obj.getString("kind").equals("constant")) {
String name = obj.getString("name");
String desc = obj.optString("description", "");
Object defaultValue = obj.opt("defaultvalue");
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
PhaserType type = typeMap.get(memberof);
if (!type.getMemberMap().containsKey(name)) {
type.getMemberMap().put(name, cons);
cons.setDeclType(type);
buildMeta(cons, obj);
}
}
}
}
|
NONSATD
| true |
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
|
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
|
private void buildConstant(JSONObject obj, Map<String, PhaserType> typeMap) {
if (obj.getString("kind").equals("constant")) {
String name = obj.getString("name");
String desc = obj.optString("description", "");
Object defaultValue = obj.opt("defaultvalue");
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
|
25,966 | 2 |
// global constant
|
private void buildConstant(JSONObject obj, Map<String, PhaserType> typeMap) {
if (obj.getString("kind").equals("constant")) {
String name = obj.getString("name");
String desc = obj.optString("description", "");
Object defaultValue = obj.opt("defaultvalue");
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
PhaserType type = typeMap.get(memberof);
if (!type.getMemberMap().containsKey(name)) {
type.getMemberMap().put(name, cons);
cons.setDeclType(type);
buildMeta(cons, obj);
}
}
}
}
|
NONSATD
| true |
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
|
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
|
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
PhaserType type = typeMap.get(memberof);
if (!type.getMemberMap().containsKey(name)) {
type.getMemberMap().put(name, cons);
cons.setDeclType(type);
buildMeta(cons, obj);
}
}
}
}
|
25,966 | 3 |
// FIXME: only add those Phaser.js constants
|
private void buildConstant(JSONObject obj, Map<String, PhaserType> typeMap) {
if (obj.getString("kind").equals("constant")) {
String name = obj.getString("name");
String desc = obj.optString("description", "");
Object defaultValue = obj.opt("defaultvalue");
String[] types;
if (obj.has("type")) {
JSONArray jsonTypes = obj.getJSONObject("type").getJSONArray("names");
types = getStringArray(jsonTypes);
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
PhaserType type = typeMap.get(memberof);
if (!type.getMemberMap().containsKey(name)) {
type.getMemberMap().put(name, cons);
cons.setDeclType(type);
buildMeta(cons, obj);
}
}
}
}
|
DEFECT
| true |
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
|
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
PhaserType type = typeMap.get(memberof);
if (!type.getMemberMap().containsKey(name)) {
|
} else {
// FIXME: this is the case of blendModes and scaleModes
types = new String[] { "Object" };
}
PhaserConstant cons = new PhaserConstant();
{
// static flag
String scope = obj.optString("scope", "");
if (scope.equals("static")) {
cons.setStatic(true);
}
}
cons.setName(name);
cons.setHelp(desc);
cons.setTypes(types);
cons.setDefaultValue(defaultValue);
String memberof = obj.optString("memberof", null);
if (memberof == null) {
// global constant
buildMeta(cons, obj);
// FIXME: only add those Phaser.js constants
if (cons.getFile().getFileName().toString().equals("Phaser.js")) {
_globalConstants.add(cons);
} else {
out.println(obj.toString(2));
throw new IllegalArgumentException("All global constants should come from Phaser.js and not from "
+ cons.getFile().getFileName() + "#" + cons.getName());
}
} else {
PhaserType type = typeMap.get(memberof);
if (!type.getMemberMap().containsKey(name)) {
type.getMemberMap().put(name, cons);
cons.setDeclType(type);
buildMeta(cons, obj);
}
}
}
}
|
17,777 | 0 |
/**
* Returns the pressed Button of the alert.
* That can be used to in a condition, eg.:
* showAlert(AlertType.CONFIRMATION, "OK?", "Is it ok?") == ButtonType.OK
*
* @param type the alert type
* @param title the title of the alert
* @param text the text of the alert
* @param onTop the alert "modality"; true if the alert should be always
* on top, false otherwise
* @return the button type (depends on the alert type)
*/
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
NONSATD
| true |
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
17,777 | 1 |
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
DESIGN
| true |
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
17,777 | 2 |
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
NONSATD
| true |
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
17,777 | 3 |
// if no button was pressed, the dialog got canceled (ESC, close)
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
NONSATD
| true |
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
public static ButtonType showAlert(AlertType type, String title, String text, boolean onTop) {
// NOTE: alert must be (re-)created everytime, otherwise the following HACK doesn't work!
Alert alert = new Alert(AlertType.NONE);
alert.setAlertType(type);
alert.setTitle(title);
alert.setHeaderText(null);
alert.setContentText(text);
// HACK: since it is not possible to set the owner of an javafx alert to
// a swing frame, we use the following approach to set the modality!
((Stage) alert.getDialogPane().getScene().getWindow()).setAlwaysOnTop(onTop);
// if no button was pressed, the dialog got canceled (ESC, close)
return alert.showAndWait().orElse(ButtonType.CANCEL);
}
|
1,396 | 0 |
/**
* Sets a costmap grid and fills it with the geometry polygons.
*/
// TODO(playerone) remove sync
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
DESIGN
| true |
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
1,396 | 1 |
// Get the grid boundaries to determine the size of the cost map.
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
NONSATD
| true |
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
|
1,396 | 2 |
// Create the grid.
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
NONSATD
| true |
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
|
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
1,396 | 3 |
// DiscretizedVertices contain only X and Y.
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
NONSATD
| true |
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
|
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
1,396 | 4 |
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
|
private synchronized void fillCostMapWithGeometries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
setValid(false);
return;
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
DESIGN
| true |
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
|
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
}
// Get the grid boundaries to determine the size of the cost map.
double[][] positions = findGeometriesBoundaries();
if (positions == null) {
Log.wtf(TAG, "Could not find grid boundaries");
setValid(false);
return;
}
// Create the grid.
makeGrid(positions, MIN_COST);
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
// DiscretizedVertices contain only X and Y.
int[][] discretizedVertices = new int[size][2];
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
discretizedVertices[i][0] = (int) Math.floor(pointPosition[0] / getResolution());
discretizedVertices[i][1] = (int) Math.floor(pointPosition[1] / getResolution());
}
// TODO mCost is not used, function below sets polygon regions to OBSTACLE_COST.
drawPolygonOnGrid(discretizedVertices);
}
}
|
1,397 | 0 |
/**
* Finds the boundaries of all the geometry polygons in X and Y.
* The boundaries are extended by robot radius in each direction.
*
* @return array of doubles {{minX, minY}}, {{maxX, maxY}}.
*/
// TODO(playerone) remove sync
|
private synchronized double[][] findGeometriesBoundaries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
return null;
}
double minX = Double.MAX_VALUE;
double maxX = -Double.MAX_VALUE;
double minY = Double.MAX_VALUE;
double maxY = -Double.MAX_VALUE;
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
minX = Math.min(minX, pointPosition[0]);
maxX = Math.max(maxX, pointPosition[0]);
minY = Math.min(minY, pointPosition[1]);
maxY = Math.max(maxY, pointPosition[1]);
}
}
return new double[][]{{minX - mRobotRadius, minY - mRobotRadius},
{maxX + mRobotRadius, maxY + mRobotRadius}};
}
|
DESIGN
| true |
private synchronized double[][] findGeometriesBoundaries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
return null;
}
double minX = Double.MAX_VALUE;
double maxX = -Double.MAX_VALUE;
double minY = Double.MAX_VALUE;
double maxY = -Double.MAX_VALUE;
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
minX = Math.min(minX, pointPosition[0]);
maxX = Math.max(maxX, pointPosition[0]);
minY = Math.min(minY, pointPosition[1]);
maxY = Math.max(maxY, pointPosition[1]);
}
}
return new double[][]{{minX - mRobotRadius, minY - mRobotRadius},
{maxX + mRobotRadius, maxY + mRobotRadius}};
}
|
private synchronized double[][] findGeometriesBoundaries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
return null;
}
double minX = Double.MAX_VALUE;
double maxX = -Double.MAX_VALUE;
double minY = Double.MAX_VALUE;
double maxY = -Double.MAX_VALUE;
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
minX = Math.min(minX, pointPosition[0]);
maxX = Math.max(maxX, pointPosition[0]);
minY = Math.min(minY, pointPosition[1]);
maxY = Math.max(maxY, pointPosition[1]);
}
}
return new double[][]{{minX - mRobotRadius, minY - mRobotRadius},
{maxX + mRobotRadius, maxY + mRobotRadius}};
}
|
private synchronized double[][] findGeometriesBoundaries() {
if (mGeometryList.isEmpty()) {
Log.wtf(TAG, "Empty geometries list");
return null;
}
double minX = Double.MAX_VALUE;
double maxX = -Double.MAX_VALUE;
double minY = Double.MAX_VALUE;
double maxY = -Double.MAX_VALUE;
for (Geometry g : mGeometryList) {
int size = g.mPolygon.getSize();
List<Transform> points = g.mPolygon.getPoints();
for (int i = 0; i < size; i++) {
double[] pointPosition = points.get(i).getPosition();
minX = Math.min(minX, pointPosition[0]);
maxX = Math.max(maxX, pointPosition[0]);
minY = Math.min(minY, pointPosition[1]);
maxY = Math.max(maxY, pointPosition[1]);
}
}
return new double[][]{{minX - mRobotRadius, minY - mRobotRadius},
{maxX + mRobotRadius, maxY + mRobotRadius}};
}
|
9,593 | 0 |
// Maybe we could support concurrent some time in the future
|
@Override
public boolean isConcurrentAccessSupported()
{
// Maybe we could support concurrent some time in the future
return false;
}
|
IMPLEMENTATION
| true |
public boolean isConcurrentAccessSupported()
{
// Maybe we could support concurrent some time in the future
return false;
}
|
@Override
public boolean isConcurrentAccessSupported()
{
// Maybe we could support concurrent some time in the future
return false;
}
|
@Override
public boolean isConcurrentAccessSupported()
{
// Maybe we could support concurrent some time in the future
return false;
}
|
1,407 | 0 |
/**
* If the currentBufferIndex is less than the buffer size - 1,
* it means, the next buffer in the list has been freed up for
* rewriting. Reuse the next available buffer in such cases.
*
* In case, the currentBufferIndex == buffer.size and buffer size is still
* less than the capacity to be allocated, just allocate a buffer of size
* chunk size.
*
*/
|
public ByteBuffer allocateBufferIfNeeded() {
ByteBuffer buffer = getCurrentBuffer();
if (buffer != null && buffer.hasRemaining()) {
return buffer;
}
if (currentBufferIndex < bufferList.size() - 1) {
buffer = getBuffer(currentBufferIndex + 1);
} else {
buffer = ByteBuffer.allocate(bufferSize);
bufferList.add(buffer);
}
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
NONSATD
| true |
public ByteBuffer allocateBufferIfNeeded() {
ByteBuffer buffer = getCurrentBuffer();
if (buffer != null && buffer.hasRemaining()) {
return buffer;
}
if (currentBufferIndex < bufferList.size() - 1) {
buffer = getBuffer(currentBufferIndex + 1);
} else {
buffer = ByteBuffer.allocate(bufferSize);
bufferList.add(buffer);
}
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
public ByteBuffer allocateBufferIfNeeded() {
ByteBuffer buffer = getCurrentBuffer();
if (buffer != null && buffer.hasRemaining()) {
return buffer;
}
if (currentBufferIndex < bufferList.size() - 1) {
buffer = getBuffer(currentBufferIndex + 1);
} else {
buffer = ByteBuffer.allocate(bufferSize);
bufferList.add(buffer);
}
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
public ByteBuffer allocateBufferIfNeeded() {
ByteBuffer buffer = getCurrentBuffer();
if (buffer != null && buffer.hasRemaining()) {
return buffer;
}
if (currentBufferIndex < bufferList.size() - 1) {
buffer = getBuffer(currentBufferIndex + 1);
} else {
buffer = ByteBuffer.allocate(bufferSize);
bufferList.add(buffer);
}
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
1,407 | 1 |
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
|
public ByteBuffer allocateBufferIfNeeded() {
ByteBuffer buffer = getCurrentBuffer();
if (buffer != null && buffer.hasRemaining()) {
return buffer;
}
if (currentBufferIndex < bufferList.size() - 1) {
buffer = getBuffer(currentBufferIndex + 1);
} else {
buffer = ByteBuffer.allocate(bufferSize);
bufferList.add(buffer);
}
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
DESIGN
| true |
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
return buffer;
}
if (currentBufferIndex < bufferList.size() - 1) {
buffer = getBuffer(currentBufferIndex + 1);
} else {
buffer = ByteBuffer.allocate(bufferSize);
bufferList.add(buffer);
}
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
public ByteBuffer allocateBufferIfNeeded() {
ByteBuffer buffer = getCurrentBuffer();
if (buffer != null && buffer.hasRemaining()) {
return buffer;
}
if (currentBufferIndex < bufferList.size() - 1) {
buffer = getBuffer(currentBufferIndex + 1);
} else {
buffer = ByteBuffer.allocate(bufferSize);
bufferList.add(buffer);
}
Preconditions.checkArgument(bufferList.size() <= capacity);
currentBufferIndex++;
// TODO: Turn the below precondition check on when Standalone pipeline
// is removed in the write path in tests
// Preconditions.checkArgument(buffer.position() == 0);
return buffer;
}
|
9,601 | 0 |
//TODO:Until the listener is set, content writing happens in I/O thread. If writability changed
//while in I/O thread and DefaultBackPressureListener is engaged, there's a chance of I/O thread
//getting blocked. Cannot recreate, only a possibility.
|
public void writeContent(HttpCarbonMessage httpOutboundRequest) {
if (handlerExecutor != null) {
handlerExecutor.executeAtTargetRequestReceiving(httpOutboundRequest);
}
BackPressureHandler backpressureHandler = Util.getBackPressureHandler(targetHandler.getContext());
Util.setBackPressureListener(httpOutboundRequest, backpressureHandler, httpOutboundRequest.getSourceContext());
resetTargetChannelState();
httpOutboundRequest.getHttpContentAsync().setMessageListener((httpContent -> {
//TODO:Until the listener is set, content writing happens in I/O thread. If writability changed
//while in I/O thread and DefaultBackPressureListener is engaged, there's a chance of I/O thread
//getting blocked. Cannot recreate, only a possibility.
Util.checkUnWritabilityAndNotify(targetHandler.getContext(), backpressureHandler);
this.channel.eventLoop().execute(() -> {
try {
senderReqRespStateManager.writeOutboundRequestEntity(httpOutboundRequest, httpContent);
} catch (Exception exception) {
String errorMsg = "Failed to send the request : "
+ exception.getMessage().toLowerCase(Locale.ENGLISH);
LOG.error(errorMsg, exception);
this.targetHandler.getHttpResponseFuture().notifyHttpListener(exception);
}
});
}));
}
|
DEFECT
| true |
resetTargetChannelState();
httpOutboundRequest.getHttpContentAsync().setMessageListener((httpContent -> {
//TODO:Until the listener is set, content writing happens in I/O thread. If writability changed
//while in I/O thread and DefaultBackPressureListener is engaged, there's a chance of I/O thread
//getting blocked. Cannot recreate, only a possibility.
Util.checkUnWritabilityAndNotify(targetHandler.getContext(), backpressureHandler);
this.channel.eventLoop().execute(() -> {
|
public void writeContent(HttpCarbonMessage httpOutboundRequest) {
if (handlerExecutor != null) {
handlerExecutor.executeAtTargetRequestReceiving(httpOutboundRequest);
}
BackPressureHandler backpressureHandler = Util.getBackPressureHandler(targetHandler.getContext());
Util.setBackPressureListener(httpOutboundRequest, backpressureHandler, httpOutboundRequest.getSourceContext());
resetTargetChannelState();
httpOutboundRequest.getHttpContentAsync().setMessageListener((httpContent -> {
//TODO:Until the listener is set, content writing happens in I/O thread. If writability changed
//while in I/O thread and DefaultBackPressureListener is engaged, there's a chance of I/O thread
//getting blocked. Cannot recreate, only a possibility.
Util.checkUnWritabilityAndNotify(targetHandler.getContext(), backpressureHandler);
this.channel.eventLoop().execute(() -> {
try {
senderReqRespStateManager.writeOutboundRequestEntity(httpOutboundRequest, httpContent);
} catch (Exception exception) {
String errorMsg = "Failed to send the request : "
+ exception.getMessage().toLowerCase(Locale.ENGLISH);
LOG.error(errorMsg, exception);
this.targetHandler.getHttpResponseFuture().notifyHttpListener(exception);
}
|
public void writeContent(HttpCarbonMessage httpOutboundRequest) {
if (handlerExecutor != null) {
handlerExecutor.executeAtTargetRequestReceiving(httpOutboundRequest);
}
BackPressureHandler backpressureHandler = Util.getBackPressureHandler(targetHandler.getContext());
Util.setBackPressureListener(httpOutboundRequest, backpressureHandler, httpOutboundRequest.getSourceContext());
resetTargetChannelState();
httpOutboundRequest.getHttpContentAsync().setMessageListener((httpContent -> {
//TODO:Until the listener is set, content writing happens in I/O thread. If writability changed
//while in I/O thread and DefaultBackPressureListener is engaged, there's a chance of I/O thread
//getting blocked. Cannot recreate, only a possibility.
Util.checkUnWritabilityAndNotify(targetHandler.getContext(), backpressureHandler);
this.channel.eventLoop().execute(() -> {
try {
senderReqRespStateManager.writeOutboundRequestEntity(httpOutboundRequest, httpContent);
} catch (Exception exception) {
String errorMsg = "Failed to send the request : "
+ exception.getMessage().toLowerCase(Locale.ENGLISH);
LOG.error(errorMsg, exception);
this.targetHandler.getHttpResponseFuture().notifyHttpListener(exception);
}
});
}));
}
|
25,987 | 0 |
// TODO: Deep clone is more correct, but probably does not matter in practice
|
@Override
public QueryProfileVariant clone() {
if (frozen) return this;
try {
QueryProfileVariant clone = (QueryProfileVariant)super.clone();
if (this.inherited != null)
clone.inherited = new ArrayList<>(this.inherited); // TODO: Deep clone is more correct, but probably does not matter in practice
clone.values = CopyOnWriteContent.deepClone(this.values);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
|
DESIGN
| true |
QueryProfileVariant clone = (QueryProfileVariant)super.clone();
if (this.inherited != null)
clone.inherited = new ArrayList<>(this.inherited); // TODO: Deep clone is more correct, but probably does not matter in practice
clone.values = CopyOnWriteContent.deepClone(this.values);
return clone;
|
@Override
public QueryProfileVariant clone() {
if (frozen) return this;
try {
QueryProfileVariant clone = (QueryProfileVariant)super.clone();
if (this.inherited != null)
clone.inherited = new ArrayList<>(this.inherited); // TODO: Deep clone is more correct, but probably does not matter in practice
clone.values = CopyOnWriteContent.deepClone(this.values);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
|
@Override
public QueryProfileVariant clone() {
if (frozen) return this;
try {
QueryProfileVariant clone = (QueryProfileVariant)super.clone();
if (this.inherited != null)
clone.inherited = new ArrayList<>(this.inherited); // TODO: Deep clone is more correct, but probably does not matter in practice
clone.values = CopyOnWriteContent.deepClone(this.values);
return clone;
}
catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
|
1,413 | 0 |
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
NONSATD
| true |
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
1,413 | 1 |
// just validate the windowId and viewId
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
NONSATD
| true |
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
1,413 | 2 |
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
DESIGN
| true |
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
@GetMapping("/{viewId}/{rowId}/field/{fieldName}/zoomInto")
public JSONZoomInto getRowFieldZoomInto(
@PathVariable("windowId") final String windowIdStr,
@PathVariable(PARAM_ViewId) final String viewIdStr,
@PathVariable("rowId") final String rowId,
@PathVariable("fieldName") final String fieldName)
{
// userSession.assertLoggedIn(); // NOTE: not needed because we are forwarding to windowRestController
ViewId.ofViewIdString(viewIdStr, WindowId.fromJson(windowIdStr)); // just validate the windowId and viewId
// TODO: atm we are forwarding all calls to windowRestController hoping the document existing and has the same ID as view's row ID.
return windowRestController.getDocumentFieldZoomInto(windowIdStr, rowId, fieldName);
}
|
34,186 | 0 |
// TODO consider implementing other model of listeners connection, without activities being bound
|
@Override
public void onCreate(@Nullable Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_map);
initViews();
Statistics.INSTANCE.trackConnectionState();
if (MwmApplication.get().nativeIsBenchmarking())
Utils.keepScreenOn(true, getWindow());
// TODO consider implementing other model of listeners connection, without activities being bound
Framework.nativeSetRoutingListener(this);
Framework.nativeSetRouteProgressListener(this);
Framework.nativeSetBalloonListener(this);
mSearchController = new FloatingSearchToolbarController(this);
mLocationPredictor = new LocationPredictor(new Handler(), this);
processIntent(getIntent());
SharingHelper.prepare();
}
|
DESIGN
| true |
if (MwmApplication.get().nativeIsBenchmarking())
Utils.keepScreenOn(true, getWindow());
// TODO consider implementing other model of listeners connection, without activities being bound
Framework.nativeSetRoutingListener(this);
Framework.nativeSetRouteProgressListener(this);
|
@Override
public void onCreate(@Nullable Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_map);
initViews();
Statistics.INSTANCE.trackConnectionState();
if (MwmApplication.get().nativeIsBenchmarking())
Utils.keepScreenOn(true, getWindow());
// TODO consider implementing other model of listeners connection, without activities being bound
Framework.nativeSetRoutingListener(this);
Framework.nativeSetRouteProgressListener(this);
Framework.nativeSetBalloonListener(this);
mSearchController = new FloatingSearchToolbarController(this);
mLocationPredictor = new LocationPredictor(new Handler(), this);
processIntent(getIntent());
SharingHelper.prepare();
}
|
@Override
public void onCreate(@Nullable Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_map);
initViews();
Statistics.INSTANCE.trackConnectionState();
if (MwmApplication.get().nativeIsBenchmarking())
Utils.keepScreenOn(true, getWindow());
// TODO consider implementing other model of listeners connection, without activities being bound
Framework.nativeSetRoutingListener(this);
Framework.nativeSetRouteProgressListener(this);
Framework.nativeSetBalloonListener(this);
mSearchController = new FloatingSearchToolbarController(this);
mLocationPredictor = new LocationPredictor(new Handler(), this);
processIntent(getIntent());
SharingHelper.prepare();
}
|
34,188 | 0 |
// TODO: This could actually be true with low probability
|
public void testZKSMFalse() throws ZKSetMembershipException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0"), new BigInteger("1"),
new BigInteger("2"), new BigInteger("3"), new BigInteger("4")};
EncryptedInteger c = new EncryptedInteger(new BigInteger("10"), pub);
BigInteger r = c.set(new BigInteger("10"));
int msgIndex = 2;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
DEFECT
| true |
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
BigInteger r = c.set(new BigInteger("10"));
int msgIndex = 2;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
public void testZKSMFalse() throws ZKSetMembershipException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0"), new BigInteger("1"),
new BigInteger("2"), new BigInteger("3"), new BigInteger("4")};
EncryptedInteger c = new EncryptedInteger(new BigInteger("10"), pub);
BigInteger r = c.set(new BigInteger("10"));
int msgIndex = 2;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
34,189 | 0 |
// TODO: This could actually be true with low probability
|
public void testZKSMSingleMemberSetFalse() throws ZKSetMembershipException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0")};
EncryptedInteger c = new EncryptedInteger(BigInteger.ONE, pub);
BigInteger r = c.set(BigInteger.ONE);
int msgIndex = 0;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
DEFECT
| true |
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
BigInteger r = c.set(BigInteger.ONE);
int msgIndex = 0;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
public void testZKSMSingleMemberSetFalse() throws ZKSetMembershipException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0")};
EncryptedInteger c = new EncryptedInteger(BigInteger.ONE, pub);
BigInteger r = c.set(BigInteger.ONE);
int msgIndex = 0;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
9,614 | 0 |
// TODO: get programs for a specific series && seasonID
|
void loadUi(EmpSeries series) {
this.episodesCarouselAdapter = new EpisodesCarouselAdapter(this, series);
RecyclerView episodesCarousel = (RecyclerView) findViewById(R.id.carousel_series_items);
episodesCarousel.setAdapter(this.episodesCarouselAdapter);
LinearLayoutManager layoutManager = new LinearLayoutManager(this, LinearLayoutManager.HORIZONTAL, false);
episodesCarousel.setLayoutManager(layoutManager);
if(series.episodes == null) {
// TODO: get programs for a specific series && seasonID
}
}
|
IMPLEMENTATION
| true |
episodesCarousel.setLayoutManager(layoutManager);
if(series.episodes == null) {
// TODO: get programs for a specific series && seasonID
}
}
|
void loadUi(EmpSeries series) {
this.episodesCarouselAdapter = new EpisodesCarouselAdapter(this, series);
RecyclerView episodesCarousel = (RecyclerView) findViewById(R.id.carousel_series_items);
episodesCarousel.setAdapter(this.episodesCarouselAdapter);
LinearLayoutManager layoutManager = new LinearLayoutManager(this, LinearLayoutManager.HORIZONTAL, false);
episodesCarousel.setLayoutManager(layoutManager);
if(series.episodes == null) {
// TODO: get programs for a specific series && seasonID
}
}
|
void loadUi(EmpSeries series) {
this.episodesCarouselAdapter = new EpisodesCarouselAdapter(this, series);
RecyclerView episodesCarousel = (RecyclerView) findViewById(R.id.carousel_series_items);
episodesCarousel.setAdapter(this.episodesCarouselAdapter);
LinearLayoutManager layoutManager = new LinearLayoutManager(this, LinearLayoutManager.HORIZONTAL, false);
episodesCarousel.setLayoutManager(layoutManager);
if(series.episodes == null) {
// TODO: get programs for a specific series && seasonID
}
}
|
34,190 | 0 |
// TODO: This could actually be true with low probability
|
public void testZKSMAddTrue() throws ZKSetMembershipException, PublicKeysNotEqualException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0"), new BigInteger("1"),
new BigInteger("2"), new BigInteger("3"), new BigInteger("4"),
new BigInteger("6")};
EncryptedInteger c1 = new EncryptedInteger(new BigInteger("2"), pub);
BigInteger r1 = c1.set(new BigInteger("2"));
EncryptedInteger c2 = new EncryptedInteger(new BigInteger("3"), pub);
BigInteger r2 = c2.set(new BigInteger("3"));
EncryptedInteger c = c1.add(c2);
BigInteger r = r1.multiply(r2).mod(this.pub.getNSquared());
int msgIndex = 5;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
DEFECT
| true |
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
BigInteger r = r1.multiply(r2).mod(this.pub.getNSquared());
int msgIndex = 5;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
public void testZKSMAddTrue() throws ZKSetMembershipException, PublicKeysNotEqualException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0"), new BigInteger("1"),
new BigInteger("2"), new BigInteger("3"), new BigInteger("4"),
new BigInteger("6")};
EncryptedInteger c1 = new EncryptedInteger(new BigInteger("2"), pub);
BigInteger r1 = c1.set(new BigInteger("2"));
EncryptedInteger c2 = new EncryptedInteger(new BigInteger("3"), pub);
BigInteger r2 = c2.set(new BigInteger("3"));
EncryptedInteger c = c1.add(c2);
BigInteger r = r1.multiply(r2).mod(this.pub.getNSquared());
int msgIndex = 5;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
34,191 | 0 |
// TODO: This could actually be true with low probability
|
public void testZKSMManyOperations() throws ZKSetMembershipException, PublicKeysNotEqualException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0"), new BigInteger("1"),
new BigInteger("2"), new BigInteger("3"), new BigInteger("4"),
new BigInteger("6")};
EncryptedInteger c1 = new EncryptedInteger(new BigInteger("2"), pub);
BigInteger r1 = c1.set(new BigInteger("2"));
EncryptedInteger c2 = new EncryptedInteger(new BigInteger("3"), pub);
BigInteger r2 = c2.set(new BigInteger("3"));
EncryptedInteger c = c1.add(c2);
BigInteger r = r1.multiply(r2).mod(this.pub.getNSquared());
int msgIndex = 5;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
DEFECT
| true |
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
BigInteger r = r1.multiply(r2).mod(this.pub.getNSquared());
int msgIndex = 5;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
public void testZKSMManyOperations() throws ZKSetMembershipException, PublicKeysNotEqualException, BigIntegerClassNotValid {
BigInteger[] theSet = {new BigInteger("0"), new BigInteger("1"),
new BigInteger("2"), new BigInteger("3"), new BigInteger("4"),
new BigInteger("6")};
EncryptedInteger c1 = new EncryptedInteger(new BigInteger("2"), pub);
BigInteger r1 = c1.set(new BigInteger("2"));
EncryptedInteger c2 = new EncryptedInteger(new BigInteger("3"), pub);
BigInteger r2 = c2.set(new BigInteger("3"));
EncryptedInteger c = c1.add(c2);
BigInteger r = r1.multiply(r2).mod(this.pub.getNSquared());
int msgIndex = 5;
for (int i=0; i<10; i++) {
ZKSetMembershipProver prover = new ZKSetMembershipProver(pub, theSet, msgIndex, c);
BigInteger[] uVals = prover.genCommitments();
ZKSetMembershipVerifier verifier = new ZKSetMembershipVerifier(pub, c, uVals, theSet);
BigInteger e = verifier.genChallenge(new BigInteger("128"));
prover.computeResponse(e, r);
BigInteger[] eVals = prover.getEs();
BigInteger[] vVals = prover.getVs();
assertFalse(verifier.checkResponse(eVals, vVals)); // TODO: This could actually be true with low probability
}
}
|
26,006 | 0 |
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
|
public void bad() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).badSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
DEFECT
| true |
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
|
public void bad() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).badSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
public void bad() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).badSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
26,007 | 0 |
/* goodG2B() - use goodsource and badsink */
|
private void goodG2B() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).goodG2BSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
NONSATD
| true |
private void goodG2B() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).goodG2BSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
private void goodG2B() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).goodG2BSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
private void goodG2B() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).goodG2BSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
26,007 | 1 |
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
|
private void goodG2B() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).goodG2BSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
DEFECT
| true |
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
|
private void goodG2B() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).goodG2BSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
private void goodG2B() throws Throwable
{
String password = (new CWE319_Cleartext_Tx_Sensitive_Info__URLConnection_passwordAuth_61b()).goodG2BSource();
if (password != null)
{
/* POTENTIAL FLAW: Use password directly in PasswordAuthentication() */
PasswordAuthentication credentials = new PasswordAuthentication("user", password.toCharArray());
IO.writeLine(credentials.toString());
}
}
|
1,432 | 0 |
// Always call the superclass
|
@Override
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
recvAsyncTask.cancel(true);
recvAsyncTask = null;
}
|
NONSATD
| true |
@Override
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
|
@Override
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
recvAsyncTask.cancel(true);
recvAsyncTask = null;
}
|
@Override
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
recvAsyncTask.cancel(true);
recvAsyncTask = null;
}
|
1,432 | 1 |
// probably not needed, onStop closes the socket, which should make the thread stop (?)
|
@Override
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
recvAsyncTask.cancel(true);
recvAsyncTask = null;
}
|
DESIGN
| true |
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
recvAsyncTask.cancel(true);
|
@Override
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
recvAsyncTask.cancel(true);
recvAsyncTask = null;
}
|
@Override
public void onDestroy() {
super.onDestroy(); // Always call the superclass
// probably not needed, onStop closes the socket, which should make the thread stop (?)
if (recvAsyncTask != null)
recvAsyncTask.cancel(true);
recvAsyncTask = null;
}
|
1,435 | 0 |
// TODO: get client/loan/savings details
|
public static CashierTransaction fromJson(
final Cashier cashier,
final JsonCommand command) {
final Integer txnType = command.integerValueOfParameterNamed("txnType");
final BigDecimal txnAmount = command.bigDecimalValueOfParameterNamed("txnAmount");
final LocalDate txnDate = command.localDateValueOfParameterNamed("txnDate");
final String entityType = command.stringValueOfParameterNamed("entityType");
final String txnNote = command.stringValueOfParameterNamed("txnNote");
final Long entityId = command.longValueOfParameterNamed("entityId");
final String currencyCode = command.stringValueOfParameterNamed("currencyCode");
// TODO: get client/loan/savings details
return new CashierTransaction (cashier, txnType, txnAmount, txnDate,
entityType, entityId, txnNote, currencyCode);
}
|
IMPLEMENTATION
| true |
final Long entityId = command.longValueOfParameterNamed("entityId");
final String currencyCode = command.stringValueOfParameterNamed("currencyCode");
// TODO: get client/loan/savings details
return new CashierTransaction (cashier, txnType, txnAmount, txnDate,
entityType, entityId, txnNote, currencyCode);
|
public static CashierTransaction fromJson(
final Cashier cashier,
final JsonCommand command) {
final Integer txnType = command.integerValueOfParameterNamed("txnType");
final BigDecimal txnAmount = command.bigDecimalValueOfParameterNamed("txnAmount");
final LocalDate txnDate = command.localDateValueOfParameterNamed("txnDate");
final String entityType = command.stringValueOfParameterNamed("entityType");
final String txnNote = command.stringValueOfParameterNamed("txnNote");
final Long entityId = command.longValueOfParameterNamed("entityId");
final String currencyCode = command.stringValueOfParameterNamed("currencyCode");
// TODO: get client/loan/savings details
return new CashierTransaction (cashier, txnType, txnAmount, txnDate,
entityType, entityId, txnNote, currencyCode);
}
|
public static CashierTransaction fromJson(
final Cashier cashier,
final JsonCommand command) {
final Integer txnType = command.integerValueOfParameterNamed("txnType");
final BigDecimal txnAmount = command.bigDecimalValueOfParameterNamed("txnAmount");
final LocalDate txnDate = command.localDateValueOfParameterNamed("txnDate");
final String entityType = command.stringValueOfParameterNamed("entityType");
final String txnNote = command.stringValueOfParameterNamed("txnNote");
final Long entityId = command.longValueOfParameterNamed("entityId");
final String currencyCode = command.stringValueOfParameterNamed("currencyCode");
// TODO: get client/loan/savings details
return new CashierTransaction (cashier, txnType, txnAmount, txnDate,
entityType, entityId, txnNote, currencyCode);
}
|
1,452 | 0 |
/**
* Partition the limit by the full path. Percentages of the limit are partitioned to
* named groups. Group membership is derived from the provided mapping function.
* @param pathToGroup Mapping function from full path to a named group.
* @return Chainable builder
*/
|
public ServerWebExchangeLimiterBuilder partitionByPathInfo(
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
});
}
|
NONSATD
| true |
public ServerWebExchangeLimiterBuilder partitionByPathInfo(
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
});
}
|
public ServerWebExchangeLimiterBuilder partitionByPathInfo(
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
});
}
|
public ServerWebExchangeLimiterBuilder partitionByPathInfo(
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
});
}
|
1,452 | 1 |
// TODO: pathWithinApplication?
|
public ServerWebExchangeLimiterBuilder partitionByPathInfo(
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
});
}
|
DESIGN
| true |
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
|
public ServerWebExchangeLimiterBuilder partitionByPathInfo(
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
});
}
|
public ServerWebExchangeLimiterBuilder partitionByPathInfo(
Function<String, String> pathToGroup) {
return partitionResolver(exchange -> {
// TODO: pathWithinApplication?
String path = exchange.getRequest().getPath().contextPath().value();
return Optional.ofNullable(path).map(pathToGroup).orElse(null);
});
}
|
9,644 | 0 |
//TODO: add render code
|
@Override
public void simpleRender(RenderManager rm) {
//TODO: add render code
}
|
IMPLEMENTATION
| true |
@Override
public void simpleRender(RenderManager rm) {
//TODO: add render code
}
|
@Override
public void simpleRender(RenderManager rm) {
//TODO: add render code
}
|
@Override
public void simpleRender(RenderManager rm) {
//TODO: add render code
}
|
34,226 | 0 |
//TODO: Replace this with your own logic
|
private boolean isEmailValid(String email) {
//TODO: Replace this with your own logic
return email.contains("@");
}
|
IMPLEMENTATION
| true |
private boolean isEmailValid(String email) {
//TODO: Replace this with your own logic
return email.contains("@");
}
|
private boolean isEmailValid(String email) {
//TODO: Replace this with your own logic
return email.contains("@");
}
|
private boolean isEmailValid(String email) {
//TODO: Replace this with your own logic
return email.contains("@");
}
|
1,459 | 0 |
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
|
@Test
public void testPredicateTypeMismatch()
{
try (LocalQueryRunner queryRunner = createLocalQueryRunner(
getMockApplyRedirectAfterPredicatePushdown(TYPE_MISMATCHED_REDIRECTION_MAPPING_BC, Optional.of(ImmutableSet.of(SOURCE_COLUMN_HANDLE_B))),
Optional.of(this::mockApplyProjection),
Optional.of(getMockApplyFilter(ImmutableSet.of(SOURCE_COLUMN_HANDLE_C))))) {
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
transaction(queryRunner.getTransactionManager(), queryRunner.getAccessControl())
.execute(MOCK_SESSION, session -> {
assertThatThrownBy(() -> queryRunner.createPlan(session, "SELECT source_col_b FROM test_table WHERE source_col_c = 'foo'", WarningCollector.NOOP))
.isInstanceOf(TrinoException.class)
// TODO report source column name instead of ColumnHandle toString
.hasMessageMatching("Redirected column mock_catalog.target_schema.target_table.destination_col_a has type integer, " +
"different from source column mock_catalog.test_schema.test_table.MockConnectorColumnHandle.*source_col_c.* type: varchar");
});
}
}
|
NONSATD
| true |
Optional.of(this::mockApplyProjection),
Optional.of(getMockApplyFilter(ImmutableSet.of(SOURCE_COLUMN_HANDLE_C))))) {
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
transaction(queryRunner.getTransactionManager(), queryRunner.getAccessControl())
.execute(MOCK_SESSION, session -> {
|
@Test
public void testPredicateTypeMismatch()
{
try (LocalQueryRunner queryRunner = createLocalQueryRunner(
getMockApplyRedirectAfterPredicatePushdown(TYPE_MISMATCHED_REDIRECTION_MAPPING_BC, Optional.of(ImmutableSet.of(SOURCE_COLUMN_HANDLE_B))),
Optional.of(this::mockApplyProjection),
Optional.of(getMockApplyFilter(ImmutableSet.of(SOURCE_COLUMN_HANDLE_C))))) {
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
transaction(queryRunner.getTransactionManager(), queryRunner.getAccessControl())
.execute(MOCK_SESSION, session -> {
assertThatThrownBy(() -> queryRunner.createPlan(session, "SELECT source_col_b FROM test_table WHERE source_col_c = 'foo'", WarningCollector.NOOP))
.isInstanceOf(TrinoException.class)
// TODO report source column name instead of ColumnHandle toString
.hasMessageMatching("Redirected column mock_catalog.target_schema.target_table.destination_col_a has type integer, " +
"different from source column mock_catalog.test_schema.test_table.MockConnectorColumnHandle.*source_col_c.* type: varchar");
});
}
}
|
@Test
public void testPredicateTypeMismatch()
{
try (LocalQueryRunner queryRunner = createLocalQueryRunner(
getMockApplyRedirectAfterPredicatePushdown(TYPE_MISMATCHED_REDIRECTION_MAPPING_BC, Optional.of(ImmutableSet.of(SOURCE_COLUMN_HANDLE_B))),
Optional.of(this::mockApplyProjection),
Optional.of(getMockApplyFilter(ImmutableSet.of(SOURCE_COLUMN_HANDLE_C))))) {
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
transaction(queryRunner.getTransactionManager(), queryRunner.getAccessControl())
.execute(MOCK_SESSION, session -> {
assertThatThrownBy(() -> queryRunner.createPlan(session, "SELECT source_col_b FROM test_table WHERE source_col_c = 'foo'", WarningCollector.NOOP))
.isInstanceOf(TrinoException.class)
// TODO report source column name instead of ColumnHandle toString
.hasMessageMatching("Redirected column mock_catalog.target_schema.target_table.destination_col_a has type integer, " +
"different from source column mock_catalog.test_schema.test_table.MockConnectorColumnHandle.*source_col_c.* type: varchar");
});
}
}
|
1,459 | 1 |
// TODO report source column name instead of ColumnHandle toString
|
@Test
public void testPredicateTypeMismatch()
{
try (LocalQueryRunner queryRunner = createLocalQueryRunner(
getMockApplyRedirectAfterPredicatePushdown(TYPE_MISMATCHED_REDIRECTION_MAPPING_BC, Optional.of(ImmutableSet.of(SOURCE_COLUMN_HANDLE_B))),
Optional.of(this::mockApplyProjection),
Optional.of(getMockApplyFilter(ImmutableSet.of(SOURCE_COLUMN_HANDLE_C))))) {
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
transaction(queryRunner.getTransactionManager(), queryRunner.getAccessControl())
.execute(MOCK_SESSION, session -> {
assertThatThrownBy(() -> queryRunner.createPlan(session, "SELECT source_col_b FROM test_table WHERE source_col_c = 'foo'", WarningCollector.NOOP))
.isInstanceOf(TrinoException.class)
// TODO report source column name instead of ColumnHandle toString
.hasMessageMatching("Redirected column mock_catalog.target_schema.target_table.destination_col_a has type integer, " +
"different from source column mock_catalog.test_schema.test_table.MockConnectorColumnHandle.*source_col_c.* type: varchar");
});
}
}
|
DESIGN
| true |
assertThatThrownBy(() -> queryRunner.createPlan(session, "SELECT source_col_b FROM test_table WHERE source_col_c = 'foo'", WarningCollector.NOOP))
.isInstanceOf(TrinoException.class)
// TODO report source column name instead of ColumnHandle toString
.hasMessageMatching("Redirected column mock_catalog.target_schema.target_table.destination_col_a has type integer, " +
"different from source column mock_catalog.test_schema.test_table.MockConnectorColumnHandle.*source_col_c.* type: varchar");
|
getMockApplyRedirectAfterPredicatePushdown(TYPE_MISMATCHED_REDIRECTION_MAPPING_BC, Optional.of(ImmutableSet.of(SOURCE_COLUMN_HANDLE_B))),
Optional.of(this::mockApplyProjection),
Optional.of(getMockApplyFilter(ImmutableSet.of(SOURCE_COLUMN_HANDLE_C))))) {
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
transaction(queryRunner.getTransactionManager(), queryRunner.getAccessControl())
.execute(MOCK_SESSION, session -> {
assertThatThrownBy(() -> queryRunner.createPlan(session, "SELECT source_col_b FROM test_table WHERE source_col_c = 'foo'", WarningCollector.NOOP))
.isInstanceOf(TrinoException.class)
// TODO report source column name instead of ColumnHandle toString
.hasMessageMatching("Redirected column mock_catalog.target_schema.target_table.destination_col_a has type integer, " +
"different from source column mock_catalog.test_schema.test_table.MockConnectorColumnHandle.*source_col_c.* type: varchar");
});
}
}
|
@Test
public void testPredicateTypeMismatch()
{
try (LocalQueryRunner queryRunner = createLocalQueryRunner(
getMockApplyRedirectAfterPredicatePushdown(TYPE_MISMATCHED_REDIRECTION_MAPPING_BC, Optional.of(ImmutableSet.of(SOURCE_COLUMN_HANDLE_B))),
Optional.of(this::mockApplyProjection),
Optional.of(getMockApplyFilter(ImmutableSet.of(SOURCE_COLUMN_HANDLE_C))))) {
// After 'source_col_c = 1' is pushed into source table scan, it's possible for 'source_col_c' table scan assignment to be pruned
// Redirection results in Project('dest_col_b') -> Filter('dest_col_c = 1') -> TableScan for such case
// but dest_col_a has mismatched type compared to source domain
transaction(queryRunner.getTransactionManager(), queryRunner.getAccessControl())
.execute(MOCK_SESSION, session -> {
assertThatThrownBy(() -> queryRunner.createPlan(session, "SELECT source_col_b FROM test_table WHERE source_col_c = 'foo'", WarningCollector.NOOP))
.isInstanceOf(TrinoException.class)
// TODO report source column name instead of ColumnHandle toString
.hasMessageMatching("Redirected column mock_catalog.target_schema.target_table.destination_col_a has type integer, " +
"different from source column mock_catalog.test_schema.test_table.MockConnectorColumnHandle.*source_col_c.* type: varchar");
});
}
}
|
34,227 | 0 |
//TODO: Replace this with your own logic
|
private boolean isPasswordValid(String password) {
//TODO: Replace this with your own logic
return password.length() > 4;
}
|
IMPLEMENTATION
| true |
private boolean isPasswordValid(String password) {
//TODO: Replace this with your own logic
return password.length() > 4;
}
|
private boolean isPasswordValid(String password) {
//TODO: Replace this with your own logic
return password.length() > 4;
}
|
private boolean isPasswordValid(String password) {
//TODO: Replace this with your own logic
return password.length() > 4;
}
|
34,231 | 0 |
// add tracing for this operation
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
|
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
|
34,231 | 1 |
// at this point we are going through mutations for each table
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
|
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
|
34,231 | 2 |
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
|
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
|
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
|
34,231 | 3 |
// build map from physical table to mutation list
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
|
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
|
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
|
34,231 | 4 |
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
|
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
|
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
|
34,231 | 5 |
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
|
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
|
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
|
34,231 | 6 |
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
DESIGN
| true |
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
|
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
|
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
|
34,231 | 7 |
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
|
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
|
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
|
34,231 | 8 |
// Track tables to which we've sent uncommitted data
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
|
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
|
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
|
34,231 | 9 |
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
|
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
|
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
|
34,231 | 10 |
// Remove batches as we process them
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
|
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
|
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
|
34,231 | 11 |
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
|
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
|
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
|
34,231 | 12 |
// add a new child span as this one failed
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
|
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
|
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
|
34,231 | 13 |
// Throw to client an exception that indicates the statements that
// were not committed successfully.
|
@SuppressWarnings("deprecation")
private void send(Iterator<TableRef> tableRefIterator) throws SQLException {
int i = 0;
long[] serverTimeStamps = null;
boolean sendAll = false;
if (tableRefIterator == null) {
serverTimeStamps = validateAll();
tableRefIterator = mutations.keySet().iterator();
sendAll = true;
}
Map<ImmutableBytesPtr, RowMutationState> valuesMap;
Map<TableInfo,List<Mutation>> physicalTableMutationMap = Maps.newLinkedHashMap();
// add tracing for this operation
try (TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables")) {
Span span = trace.getSpan();
ImmutableBytesWritable indexMetaDataPtr = new ImmutableBytesWritable();
while (tableRefIterator.hasNext()) {
// at this point we are going through mutations for each table
final TableRef tableRef = tableRefIterator.next();
valuesMap = mutations.get(tableRef);
if (valuesMap == null || valuesMap.isEmpty()) {
continue;
}
// Validate as we go if transactional since we can undo if a problem occurs (which is unlikely)
long serverTimestamp = serverTimeStamps == null ? validateAndGetServerTimestamp(tableRef, valuesMap) : serverTimeStamps[i++];
Long scn = connection.getSCN();
long mutationTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
final PTable table = tableRef.getTable();
Iterator<Pair<PName,List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, mutationTimestamp, serverTimestamp, false, sendAll);
// build map from physical table to mutation list
boolean isDataTable = true;
while (mutationsIterator.hasNext()) {
Pair<PName,List<Mutation>> pair = mutationsIterator.next();
PName hTableName = pair.getFirst();
List<Mutation> mutationList = pair.getSecond();
TableInfo tableInfo = new TableInfo(isDataTable, hTableName, tableRef);
List<Mutation> oldMutationList = physicalTableMutationMap.put(tableInfo, mutationList);
if (oldMutationList!=null)
mutationList.addAll(0, oldMutationList);
isDataTable = false;
}
// For transactions, track the statement indexes as we send data
// over because our CommitException should include all statements
// involved in the transaction since none of them would have been
// committed in the event of a failure.
if (table.isTransactional()) {
addUncommittedStatementIndexes(valuesMap.values());
if (txMutations.isEmpty()) {
txMutations = Maps.newHashMapWithExpectedSize(mutations.size());
}
// Keep all mutations we've encountered until a commit or rollback.
// This is not ideal, but there's not good way to get the values back
// in the event that we need to replay the commit.
// Copy TableRef so we have the original PTable and know when the
// indexes have changed.
joinMutationState(new TableRef(tableRef), valuesMap, txMutations);
}
}
long serverTimestamp = HConstants.LATEST_TIMESTAMP;
Iterator<Entry<TableInfo, List<Mutation>>> mutationsIterator = physicalTableMutationMap.entrySet().iterator();
while (mutationsIterator.hasNext()) {
Entry<TableInfo, List<Mutation>> pair = mutationsIterator.next();
TableInfo tableInfo = pair.getKey();
byte[] htableName = tableInfo.getHTableName().getBytes();
List<Mutation> mutationList = pair.getValue();
//create a span per target table
//TODO maybe we can be smarter about the table name to string here?
Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));
int retryCount = 0;
boolean shouldRetry = false;
long numMutations = 0;
long mutationSizeBytes = 0;
long mutationCommitTime = 0;
long numFailedMutations = 0;;
long startTime = 0;
do {
TableRef origTableRef = tableInfo.getOrigTableRef();
PTable table = origTableRef.getTable();
table.getIndexMaintainers(indexMetaDataPtr, connection);
final ServerCache cache = tableInfo.isDataTable() ? setMetaDataOnMutations(origTableRef, mutationList, indexMetaDataPtr) : null;
// If we haven't retried yet, retry for this case only, as it's possible that
// a split will occur after we send the index metadata cache to all known
// region servers.
shouldRetry = cache!=null;
SQLException sqlE = null;
HTableInterface hTable = connection.getQueryServices().getTable(htableName);
try {
if (table.isTransactional()) {
// Track tables to which we've sent uncommitted data
uncommittedPhysicalNames.add(table.getPhysicalName().getString());
// If we have indexes, wrap the HTable in a delegate HTable that
// will attach the necessary index meta data in the event of a
// rollback
if (!table.getIndexes().isEmpty()) {
hTable = new MetaDataAwareHTable(hTable, origTableRef);
}
hTable = TransactionUtil.getPhoenixTransactionTable(phoenixTransactionContext, hTable, table);
}
numMutations = mutationList.size();
GLOBAL_MUTATION_BATCH_SIZE.update(numMutations);
mutationSizeBytes = calculateMutationSize(mutationList);
startTime = System.currentTimeMillis();
child.addTimelineAnnotation("Attempt " + retryCount);
List<List<Mutation>> mutationBatchList = getMutationBatchList(batchSize, batchSizeBytes, mutationList);
for (List<Mutation> mutationBatch : mutationBatchList) {
hTable.batch(mutationBatch);
batchCount++;
}
if (logger.isDebugEnabled()) logger.debug("Sent batch of " + numMutations + " for " + Bytes.toString(htableName));
child.stop();
child.stop();
shouldRetry = false;
mutationCommitTime = System.currentTimeMillis() - startTime;
GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
numFailedMutations = 0;
if (tableInfo.isDataTable()) {
numRows -= numMutations;
}
// Remove batches as we process them
mutations.remove(origTableRef);
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
}
if (sqlE != null) {
throw sqlE;
}
}
}
} while (shouldRetry && retryCount++ < 1);
}
}
}
|
NONSATD
| true |
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
|
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
|
} catch (Exception e) {
mutationCommitTime = System.currentTimeMillis() - startTime;
serverTimestamp = ServerUtil.parseServerTimestamp(e);
SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
if (inferredE != null) {
if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
// Swallow this exception once, as it's possible that we split after sending the index metadata
// and one of the region servers doesn't have it. This will cause it to have it the next go around.
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
logger.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
child.addTimelineAnnotation(msg);
child.stop();
child = Tracing.child(span,"Failed batch, attempting retry");
continue;
}
e = inferredE;
}
// Throw to client an exception that indicates the statements that
// were not committed successfully.
int[] uncommittedStatementIndexes = getUncommittedStatementIndexes();
sqlE = new CommitException(e, uncommittedStatementIndexes, serverTimestamp);
numFailedMutations = uncommittedStatementIndexes.length;
GLOBAL_MUTATION_BATCH_FAILED_COUNT.update(numFailedMutations);
} finally {
MutationMetric mutationsMetric = new MutationMetric(numMutations, mutationSizeBytes, mutationCommitTime, numFailedMutations);
mutationMetricQueue.addMetricsForTable(Bytes.toString(htableName), mutationsMetric);
try {
if (cache!=null)
cache.close();
} finally {
try {
hTable.close();
}
catch (IOException e) {
if (sqlE != null) {
sqlE.setNextException(ServerUtil.parseServerException(e));
} else {
sqlE = ServerUtil.parseServerException(e);
}
|
1,463 | 0 |
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
|
public void parse(InputStream file) throws IOException, TikaException {
ByteArrayOutputStream xmpraw = new ByteArrayOutputStream();
if (!scanner.parse(file, xmpraw)) {
return;
}
Reader decoded = new InputStreamReader(
new ByteArrayInputStream(xmpraw.toByteArray()),
DEFAULT_XMP_CHARSET);
try {
XMPMetadata xmp = XMPMetadata.load(new InputSource(decoded));
XMPSchemaDublinCore dc = xmp.getDublinCoreSchema();
if (dc != null) {
if (dc.getTitle() != null) {
metadata.set(DublinCore.TITLE, dc.getTitle());
}
if (dc.getDescription() != null) {
metadata.set(DublinCore.DESCRIPTION, dc.getDescription());
}
if (dc.getCreators() != null && dc.getCreators().size() > 0) {
metadata.set(DublinCore.CREATOR, joinCreators(dc.getCreators()));
}
if (dc.getSubjects() != null && dc.getSubjects().size() > 0) {
Iterator<String> keywords = dc.getSubjects().iterator();
while (keywords.hasNext()) {
metadata.add(DublinCore.SUBJECT, keywords.next());
}
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
}
}
} catch (IOException e) {
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
}
}
|
DESIGN
| true |
metadata.add(DublinCore.SUBJECT, keywords.next());
}
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
}
}
|
metadata.set(DublinCore.DESCRIPTION, dc.getDescription());
}
if (dc.getCreators() != null && dc.getCreators().size() > 0) {
metadata.set(DublinCore.CREATOR, joinCreators(dc.getCreators()));
}
if (dc.getSubjects() != null && dc.getSubjects().size() > 0) {
Iterator<String> keywords = dc.getSubjects().iterator();
while (keywords.hasNext()) {
metadata.add(DublinCore.SUBJECT, keywords.next());
}
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
}
}
} catch (IOException e) {
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
}
}
|
new ByteArrayInputStream(xmpraw.toByteArray()),
DEFAULT_XMP_CHARSET);
try {
XMPMetadata xmp = XMPMetadata.load(new InputSource(decoded));
XMPSchemaDublinCore dc = xmp.getDublinCoreSchema();
if (dc != null) {
if (dc.getTitle() != null) {
metadata.set(DublinCore.TITLE, dc.getTitle());
}
if (dc.getDescription() != null) {
metadata.set(DublinCore.DESCRIPTION, dc.getDescription());
}
if (dc.getCreators() != null && dc.getCreators().size() > 0) {
metadata.set(DublinCore.CREATOR, joinCreators(dc.getCreators()));
}
if (dc.getSubjects() != null && dc.getSubjects().size() > 0) {
Iterator<String> keywords = dc.getSubjects().iterator();
while (keywords.hasNext()) {
metadata.add(DublinCore.SUBJECT, keywords.next());
}
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
}
}
} catch (IOException e) {
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
}
}
|
1,463 | 1 |
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
|
public void parse(InputStream file) throws IOException, TikaException {
ByteArrayOutputStream xmpraw = new ByteArrayOutputStream();
if (!scanner.parse(file, xmpraw)) {
return;
}
Reader decoded = new InputStreamReader(
new ByteArrayInputStream(xmpraw.toByteArray()),
DEFAULT_XMP_CHARSET);
try {
XMPMetadata xmp = XMPMetadata.load(new InputSource(decoded));
XMPSchemaDublinCore dc = xmp.getDublinCoreSchema();
if (dc != null) {
if (dc.getTitle() != null) {
metadata.set(DublinCore.TITLE, dc.getTitle());
}
if (dc.getDescription() != null) {
metadata.set(DublinCore.DESCRIPTION, dc.getDescription());
}
if (dc.getCreators() != null && dc.getCreators().size() > 0) {
metadata.set(DublinCore.CREATOR, joinCreators(dc.getCreators()));
}
if (dc.getSubjects() != null && dc.getSubjects().size() > 0) {
Iterator<String> keywords = dc.getSubjects().iterator();
while (keywords.hasNext()) {
metadata.add(DublinCore.SUBJECT, keywords.next());
}
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
}
}
} catch (IOException e) {
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
}
}
|
IMPLEMENTATION
| true |
}
} catch (IOException e) {
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
}
}
|
if (dc.getSubjects() != null && dc.getSubjects().size() > 0) {
Iterator<String> keywords = dc.getSubjects().iterator();
while (keywords.hasNext()) {
metadata.add(DublinCore.SUBJECT, keywords.next());
}
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
}
}
} catch (IOException e) {
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
}
}
|
if (dc != null) {
if (dc.getTitle() != null) {
metadata.set(DublinCore.TITLE, dc.getTitle());
}
if (dc.getDescription() != null) {
metadata.set(DublinCore.DESCRIPTION, dc.getDescription());
}
if (dc.getCreators() != null && dc.getCreators().size() > 0) {
metadata.set(DublinCore.CREATOR, joinCreators(dc.getCreators()));
}
if (dc.getSubjects() != null && dc.getSubjects().size() > 0) {
Iterator<String> keywords = dc.getSubjects().iterator();
while (keywords.hasNext()) {
metadata.add(DublinCore.SUBJECT, keywords.next());
}
// TODO should we set KEYWORDS too?
// All tested photo managers set the same in Iptc.Application2.Keywords and Xmp.dc.subject
}
}
} catch (IOException e) {
// Could not parse embedded XMP metadata. That's not a serious
// problem, so we'll just ignore the issue for now.
// TODO: Make error handling like this configurable.
}
}
|
1,477 | 0 |
// TODO replace with keys for i18n
|
@Override
public String getName() {
return "Manage services";
}
|
DESIGN
| true |
@Override
public String getName() {
return "Manage services";
}
|
@Override
public String getName() {
return "Manage services";
}
|
@Override
public String getName() {
return "Manage services";
}
|
9,670 | 0 |
// Apply heightmap
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
NONSATD
| true |
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
|
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
|
9,670 | 1 |
//Clamp newHeight within the selection area
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
NONSATD
| true |
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
|
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
|
9,670 | 2 |
// Offset x,z to be 'real' coordinates
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
NONSATD
| true |
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
|
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
|
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
|
9,670 | 3 |
// Depending on growing or shrinking we need to start at the bottom or top
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
NONSATD
| true |
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
|
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
|
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
|
9,670 | 4 |
// Set the top block of the column to be the same type (this might go wrong with rounding)
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
IMPLEMENTATION
| true |
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
|
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
|
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
|
9,670 | 5 |
// Skip water/lava
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
NONSATD
| true |
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
|
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
|
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
|
9,670 | 6 |
// Grow -- start from 1 below top replacing airblocks
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
NONSATD
| true |
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
|
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
|
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
|
9,670 | 7 |
// Fill rest with air
|
public int applyLayers(int[] data) {
checkNotNull(data);
BlockVector3 minY = region.getMinimumPoint();
int originX = minY.getBlockX();
int originZ = minY.getBlockZ();
int maxY = region.getMaximumPoint().getBlockY();
BlockState fillerAir = BlockTypes.AIR.getDefaultState();
int blocksChanged = 0;
BlockStateHolder<BlockState> tmpBlock = BlockTypes.AIR.getDefaultState();
int maxY4 = maxY << 4;
int index = 0;
// Apply heightmap
for (int z = 0; z < height; ++z) {
int zr = z + originZ;
for (int x = 0; x < width; ++x) {
if (this.invalid != null && this.invalid[index]) {
continue;
}
int curHeight = this.data[index];
//Clamp newHeight within the selection area
int newHeight = Math.min(maxY4, data[index++]);
int curBlock = (curHeight) >> 4;
int newBlock = (newHeight + 15) >> 4;
// Offset x,z to be 'real' coordinates
int xr = x + originX;
// Depending on growing or shrinking we need to start at the bottom or top
if (newHeight > curHeight) {
// Set the top block of the column to be the same type (this might go wrong with rounding)
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
// Skip water/lava
if (existing.getBlockType().getMaterial().isMovementBlocker()) {
// Grow -- start from 1 below top replacing airblocks
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
}
|
NONSATD
| true |
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
|
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
|
for (int setY = newBlock - 1, getY = curBlock; setY >= curBlock; --setY, getY--) {
BlockStateHolder<BlockState> get = session.getBlock(xr, getY, zr);
if (get != BlockTypes.AIR.getDefaultState()) {
tmpBlock = get;
}
session.setBlock(xr, setY, zr, tmpBlock);
++blocksChanged;
}
int setData = newHeight & 15;
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
++blocksChanged;
}
}
} else if (curHeight > newHeight) {
// Fill rest with air
for (int y = newBlock + 1; y <= ((curHeight + 15) >> 4); ++y) {
session.setBlock(xr, y, zr, fillerAir);
++blocksChanged;
}
// Set the top block of the column to be the same type
// (this could otherwise go wrong with rounding)
int setData = newHeight & 15;
BlockStateHolder<BlockState> existing = session.getBlock(xr, curBlock, zr);
if (setData != 0) {
existing = PropertyGroup.LEVEL.set(existing, setData - 1);
session.setBlock(xr, newBlock, zr, existing);
} else {
existing = PropertyGroup.LEVEL.set(existing, 15);
session.setBlock(xr, newBlock, zr, existing);
}
++blocksChanged;
}
}
}
return blocksChanged;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.