Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into refactor/rounding
Browse files Browse the repository at this point in the history
Signed-off-by: Ketan Verma <[email protected]>
  • Loading branch information
ketanv3 committed Nov 1, 2023
2 parents 3d4f7ad + aa0fddb commit 3d7a3f4
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 6 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855))
- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/))
- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558))
- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057))


### Deprecated
Expand Down Expand Up @@ -126,6 +125,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273))
- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999))
- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023))
- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057))

### Deprecated

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -501,8 +501,9 @@ public void testShardRoutingWithNetworkDisruption_FailOpenEnabled() throws Excep

logger.info("--> creating network partition disruption");
final String clusterManagerNode1 = internalCluster().getClusterManagerName();
Set<String> nodesInOneSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0)).collect(Collectors.toCollection(HashSet::new));
Set<String> nodesInOtherSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new));
Set<String> nodesInOneSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new));
Set<String> nodesInOtherSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0), nodeMap.get("c").get(0))
.collect(Collectors.toCollection(HashSet::new));

NetworkDisruption networkDisruption = new NetworkDisruption(
new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide),
Expand Down Expand Up @@ -870,8 +871,7 @@ private void assertSearchInAZ(String az) {
SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal();
if (stat.getNode().isDataNode()) {
if (stat.getNode().getId().equals(dataNodeId)) {
Assert.assertTrue(searchStats.getFetchCount() > 0L);
Assert.assertTrue(searchStats.getQueryCount() > 0L);
Assert.assertTrue(searchStats.getFetchCount() > 0L || searchStats.getQueryCount() > 0L);
}
}
}
Expand Down Expand Up @@ -945,7 +945,6 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws
}

logger.info("--> network disruption is stopped");
networkDisruption.stopDisrupting();

for (int i = 0; i < 50; i++) {
try {
Expand All @@ -962,6 +961,8 @@ public void testSearchAggregationWithNetworkDisruption_FailOpenEnabled() throws
fail("search should not fail");
}
}
networkDisruption.stopDisrupting();

assertSearchInAZ("b");
assertSearchInAZ("c");
assertNoSearchInAZ("a");
Expand Down

0 comments on commit 3d7a3f4

Please sign in to comment.