Skip to content

Created an API to fetch remote store metadata #18257

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,261 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.remotestore;

import org.opensearch.action.admin.cluster.remotestore.metadata.RemoteStoreMetadataResponse;
import org.opensearch.action.admin.cluster.remotestore.metadata.RemoteStoreShardMetadata;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.plugins.Plugin;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.transport.MockTransportService.TestPlugin;

import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.hasKey;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteStoreMetadataIT extends RemoteStoreBaseIntegTestCase {

private static final String INDEX_NAME = "remote-store-meta-api-test";

@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Stream.concat(super.nodePlugins().stream(), Stream.of(TestPlugin.class)).collect(Collectors.toList());
}

public void setup() {
internalCluster().startNodes(3);
}

@SuppressWarnings("unchecked")
public void testMetadataResponseFromAllNodes() {
setup();

createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 3));
ensureGreen(INDEX_NAME);
indexDocs();
client().admin().indices().prepareRefresh(INDEX_NAME).get();

ClusterState state = getClusterState();
List<String> nodes = state.nodes().getNodes().values().stream().map(DiscoveryNode::getName).collect(Collectors.toList());

for (String node : nodes) {
RemoteStoreMetadataResponse response = client(node).admin().cluster().prepareRemoteStoreMetadata(INDEX_NAME, null).get();
assertTrue(response.getSuccessfulShards() > 0);
assertNotNull(response.groupByIndexAndShards());

response.groupByIndexAndShards().forEach((index, shardMap) -> {
shardMap.forEach((shardId, metadataList) -> {
assertFalse(metadataList.isEmpty());

for (RemoteStoreShardMetadata metadata : metadataList) {
assertEquals(index, metadata.getIndexName());
assertEquals((int) shardId, metadata.getShardId());

assertNotNull(metadata.getLatestSegmentMetadataFileName());
assertNotNull(metadata.getLatestTranslogMetadataFileName());

Map<String, Map<String, Object>> segmentFiles = metadata.getSegmentMetadataFiles();
assertNotNull(segmentFiles);
assertFalse(segmentFiles.isEmpty());

for (Map<String, Object> fileMeta : segmentFiles.values()) {
Map<String, Object> files = (Map<String, Object>) fileMeta.get("files");
assertNotNull(files);
assertFalse(files.isEmpty());
for (Object value : files.values()) {
Map<String, Object> meta = (Map<String, Object>) value;
assertThat(meta, allOf(hasKey("original_name"), hasKey("checksum"), hasKey("length")));
}

Map<String, Object> checkpoint = (Map<String, Object>) fileMeta.get("replication_checkpoint");
assertNotNull(checkpoint);
assertThat(
checkpoint,
allOf(
hasKey("primary_term"),
hasKey("segments_gen"),
hasKey("segment_infos_version"),
hasKey("codec"),
hasKey("created_timestamp")
)
);
}

Map<String, Map<String, Object>> translogFiles = metadata.getTranslogMetadataFiles();
assertNotNull(translogFiles);
assertFalse(translogFiles.isEmpty());
for (Map<String, Object> translogMeta : translogFiles.values()) {
assertThat(
translogMeta,
allOf(
hasKey("primary_term"),
hasKey("generation"),
hasKey("min_translog_gen"),
hasKey("generation_to_primary_term")
)
);
}
}
});
});
}
}

@SuppressWarnings("unchecked")
public void testMetadataResponseAllShards() throws Exception {
setup();

createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 2));
ensureGreen(INDEX_NAME);
indexDocs();
client().admin().indices().prepareRefresh(INDEX_NAME).get();

assertBusy(() -> { assertFalse(client().admin().cluster().prepareHealth(INDEX_NAME).get().isTimedOut()); });

RemoteStoreMetadataResponse response = client().admin().cluster().prepareRemoteStoreMetadata(INDEX_NAME, null).get();

response.groupByIndexAndShards().forEach((index, shardMap) -> {
shardMap.forEach((shardId, metadataList) -> {
assertFalse(metadataList.isEmpty());

for (RemoteStoreShardMetadata metadata : metadataList) {
assertEquals(index, metadata.getIndexName());
assertEquals((int) shardId, metadata.getShardId());

assertNotNull(metadata.getLatestSegmentMetadataFileName());
assertNotNull(metadata.getLatestTranslogMetadataFileName());

Map<String, Map<String, Object>> segmentFiles = metadata.getSegmentMetadataFiles();
assertNotNull(segmentFiles);
assertFalse(segmentFiles.isEmpty());

for (Map<String, Object> fileMeta : segmentFiles.values()) {
Map<String, Object> files = (Map<String, Object>) fileMeta.get("files");
assertNotNull(files);
assertFalse(files.isEmpty());
for (Object value : files.values()) {
Map<String, Object> meta = (Map<String, Object>) value;
assertThat(meta, allOf(hasKey("original_name"), hasKey("checksum"), hasKey("length")));
}

Map<String, Object> checkpoint = (Map<String, Object>) fileMeta.get("replication_checkpoint");
assertNotNull(checkpoint);
assertThat(
checkpoint,
allOf(
hasKey("primary_term"),
hasKey("segments_gen"),
hasKey("segment_infos_version"),
hasKey("codec"),
hasKey("created_timestamp")
)
);
}

Map<String, Map<String, Object>> translogFiles = metadata.getTranslogMetadataFiles();
assertNotNull(translogFiles);
assertFalse(translogFiles.isEmpty());
for (Map<String, Object> translogMeta : translogFiles.values()) {
assertThat(
translogMeta,
allOf(
hasKey("primary_term"),
hasKey("generation"),
hasKey("min_translog_gen"),
hasKey("generation_to_primary_term")
)
);
}
}
});
});
}

public void testMultipleMetadataFilesPerShard() throws Exception {
setup();

createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 1));
ensureGreen(INDEX_NAME);

int refreshCount = 5;
for (int i = 0; i < refreshCount; i++) {
indexDocs();
client().admin().indices().prepareRefresh(INDEX_NAME).get();
Thread.sleep(100);
}

RemoteStoreMetadataResponse response = client().admin().cluster().prepareRemoteStoreMetadata(INDEX_NAME, null).get();

response.groupByIndexAndShards().forEach((index, shardMap) -> {
shardMap.forEach((shardId, metadataList) -> {
assertFalse(metadataList.isEmpty());

for (RemoteStoreShardMetadata metadata : metadataList) {
assertEquals(refreshCount, metadata.getSegmentMetadataFiles().size());
assertTrue(metadata.getTranslogMetadataFiles().size() >= 1);
}
});
});
}

public void testMetadataResponseMultipleIndicesAndShards() throws Exception {
setup();

String index1 = INDEX_NAME + "-1";
String index2 = INDEX_NAME + "-2";

createIndex(index1, remoteStoreIndexSettings(0, 2));
createIndex(index2, remoteStoreIndexSettings(0, 3));
ensureGreen(index1, index2);

indexDocs(index1);
indexDocs(index2);

client().admin().indices().prepareRefresh(index1).get();
client().admin().indices().prepareRefresh(index2).get();

RemoteStoreMetadataResponse response = client().admin().cluster().prepareRemoteStoreMetadata("*", null).get();

Map<String, Map<Integer, List<RemoteStoreShardMetadata>>> grouped = response.groupByIndexAndShards();

assertTrue(grouped.containsKey(index1));
assertTrue(grouped.containsKey(index2));

grouped.forEach((index, shardMap) -> {
shardMap.forEach((shardId, metadataList) -> {
assertFalse(metadataList.isEmpty());
metadataList.forEach(metadata -> {
assertEquals(index, metadata.getIndexName());
assertEquals((int) shardId, metadata.getShardId());
assertNotNull(metadata.getSegmentMetadataFiles());
assertFalse(metadata.getSegmentMetadataFiles().isEmpty());
assertNotNull(metadata.getTranslogMetadataFiles());
assertFalse(metadata.getTranslogMetadataFiles().isEmpty());
});
});
});
}

private void indexDocs() {
indexDocs(INDEX_NAME);
}

private void indexDocs(String indexName) {
for (int i = 0; i < randomIntBetween(10, 20); i++) {
client().prepareIndex(indexName).setId("doc-" + i).setSource("field", "value-" + i).get();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,8 @@
import org.opensearch.action.admin.cluster.node.usage.TransportNodesUsageAction;
import org.opensearch.action.admin.cluster.remote.RemoteInfoAction;
import org.opensearch.action.admin.cluster.remote.TransportRemoteInfoAction;
import org.opensearch.action.admin.cluster.remotestore.metadata.RemoteStoreMetadataAction;
import org.opensearch.action.admin.cluster.remotestore.metadata.TransportRemoteStoreMetadataAction;
import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreAction;
import org.opensearch.action.admin.cluster.remotestore.restore.TransportRestoreRemoteStoreAction;
import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsAction;
Expand Down Expand Up @@ -378,6 +380,7 @@
import org.opensearch.rest.action.admin.cluster.RestPutStoredScriptAction;
import org.opensearch.rest.action.admin.cluster.RestReloadSecureSettingsAction;
import org.opensearch.rest.action.admin.cluster.RestRemoteClusterInfoAction;
import org.opensearch.rest.action.admin.cluster.RestRemoteStoreMetadataAction;
import org.opensearch.rest.action.admin.cluster.RestRemoteStoreStatsAction;
import org.opensearch.rest.action.admin.cluster.RestRestoreRemoteStoreAction;
import org.opensearch.rest.action.admin.cluster.RestRestoreSnapshotAction;
Expand Down Expand Up @@ -638,6 +641,7 @@ public <Request extends ActionRequest, Response extends ActionResponse> void reg
actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
actions.register(WlmStatsAction.INSTANCE, TransportWlmStatsAction.class);
actions.register(RemoteStoreStatsAction.INSTANCE, TransportRemoteStoreStatsAction.class);
actions.register(RemoteStoreMetadataAction.INSTANCE, TransportRemoteStoreMetadataAction.class);
actions.register(NodesUsageAction.INSTANCE, TransportNodesUsageAction.class);
actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class);
Expand Down Expand Up @@ -1053,6 +1057,7 @@ public void initRestHandlers(Supplier<DiscoveryNodes> nodesInCluster) {
registerHandler.accept(new RestGetDecommissionStateAction());
registerHandler.accept(new RestRemoteStoreStatsAction());
registerHandler.accept(new RestRestoreRemoteStoreAction());
registerHandler.accept(new RestRemoteStoreMetadataAction());

// pull-based ingestion API
registerHandler.accept(new RestPauseIngestionAction());
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.action.admin.cluster.remotestore.metadata;

import org.opensearch.action.ActionType;

/**
* Action to fetch metadata from remote store
*
* @opensearch.internal
*/
public class RemoteStoreMetadataAction extends ActionType<RemoteStoreMetadataResponse> {
public static final RemoteStoreMetadataAction INSTANCE = new RemoteStoreMetadataAction();
public static final String NAME = "cluster:admin/remote_store/metadata";

private RemoteStoreMetadataAction() {
super(NAME, RemoteStoreMetadataResponse::new);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/

package org.opensearch.action.admin.cluster.remotestore.metadata;

import org.opensearch.action.support.broadcast.BroadcastRequest;
import org.opensearch.common.annotation.ExperimentalApi;
import org.opensearch.core.common.io.stream.StreamInput;
import org.opensearch.core.common.io.stream.StreamOutput;

import java.io.IOException;

/**
* Request object for fetching remote store metadata of shards across one or more indices.
*
* @opensearch.internal
*/
@ExperimentalApi
public class RemoteStoreMetadataRequest extends BroadcastRequest<RemoteStoreMetadataRequest> {
private String[] shards;

public RemoteStoreMetadataRequest() {
super((String[]) null);
shards = new String[0];
}

public RemoteStoreMetadataRequest(StreamInput in) throws IOException {
super(in);
shards = in.readStringArray();
}

@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(shards);
}

public RemoteStoreMetadataRequest shards(String... shards) {
this.shards = shards;
return this;
}

public String[] shards() {
return this.shards;
}
}
Loading
Loading