From f7418bcb82a9668128c7bf72c1e25bbe31026fd9 Mon Sep 17 00:00:00 2001 From: Eric Pugh Date: Wed, 27 Nov 2024 07:43:30 -0500 Subject: [PATCH] Code review flagging lots of potentials fixes --- .../org/apache/solr/handler/IndexFetcher.java | 109 +++--------------- .../solr/handler/ReplicationHandler.java | 61 +++------- .../solr/handler/TestReplicationHandler.java | 29 ++--- 3 files changed, 44 insertions(+), 155 deletions(-) diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java index 95c4a1be611..a1546f798dc 100644 --- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java +++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java @@ -27,8 +27,6 @@ import static org.apache.solr.handler.ReplicationHandler.CONF_FILES; import static org.apache.solr.handler.ReplicationHandler.FETCH_FROM_LEADER; import static org.apache.solr.handler.ReplicationHandler.LEADER_URL; -import static org.apache.solr.handler.ReplicationHandler.LEGACY_LEADER_URL; -import static org.apache.solr.handler.ReplicationHandler.LEGACY_SKIP_COMMIT_ON_LEADER_VERSION_ZERO; import static org.apache.solr.handler.ReplicationHandler.SIZE; import static org.apache.solr.handler.ReplicationHandler.SKIP_COMMIT_ON_LEADER_VERSION_ZERO; import static org.apache.solr.handler.admin.api.ReplicationAPIBase.CHECKSUM; @@ -53,11 +51,9 @@ import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.FileStore; -import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; @@ -189,8 +185,6 @@ public class IndexFetcher { private Integer soTimeout; - private boolean downloadTlogFiles = false; - private boolean skipCommitOnLeaderVersionZero = true; private boolean clearLocalIndexFirst = false; @@ -211,7 +205,7 @@ public static class IndexFetchResult { new IndexFetchResult("Local index commit is already in sync with peer", true, null); public static final IndexFetchResult INDEX_FETCH_FAILURE = - new IndexFetchResult("Fetching lastest index is failed", false, null); + new IndexFetchResult("Fetching latest index is failed", false, null); public static final IndexFetchResult INDEX_FETCH_SUCCESS = new IndexFetchResult("Fetching latest index is successful", true, null); public static final IndexFetchResult LOCK_OBTAIN_FAILED = @@ -226,8 +220,6 @@ public static class IndexFetchResult { public static final IndexFetchResult PEER_INDEX_COMMIT_DELETED = new IndexFetchResult( "No files to download because IndexCommit in peer was deleted", false, null); - public static final IndexFetchResult LOCAL_ACTIVITY_DURING_REPLICATION = - new IndexFetchResult("Local index modification during replication", false, null); public static final IndexFetchResult EXPECTING_NON_LEADER = new IndexFetchResult("Replicating from leader but I'm the shard leader", false, null); public static final IndexFetchResult LEADER_IS_NOT_ACTIVE = @@ -279,17 +271,12 @@ public IndexFetcher( if (fetchFromLeader != null && fetchFromLeader instanceof Boolean) { this.fetchFromLeader = (boolean) fetchFromLeader; } - Object skipCommitOnLeaderVersionZero = - ReplicationHandler.getObjectWithBackwardCompatibility( - initArgs, - SKIP_COMMIT_ON_LEADER_VERSION_ZERO, - LEGACY_SKIP_COMMIT_ON_LEADER_VERSION_ZERO); + Object skipCommitOnLeaderVersionZero = initArgs.get(SKIP_COMMIT_ON_LEADER_VERSION_ZERO); if (skipCommitOnLeaderVersionZero != null && skipCommitOnLeaderVersionZero instanceof Boolean) { this.skipCommitOnLeaderVersionZero = (boolean) skipCommitOnLeaderVersionZero; } - String leaderUrl = - ReplicationHandler.getObjectWithBackwardCompatibility( - initArgs, LEADER_URL, LEGACY_LEADER_URL); + String leaderUrl = (String) initArgs.get(LEADER_URL); + if (leaderUrl == null && !this.fetchFromLeader) throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "'leaderUrl' is required for a follower"); @@ -306,7 +293,7 @@ public IndexFetcher( connTimeout = getParameter(initArgs, HttpClientUtil.PROP_CONNECTION_TIMEOUT, 30000, null); // allow a leader override for tests - you specify this in /replication follower section of - // solrconfig and some test don't want to define this + // solrconfig and some tests don't want to define this soTimeout = Integer.getInteger("solr.indexfetcher.sotimeout", -1); if (soTimeout == -1) { soTimeout = getParameter(initArgs, HttpClientUtil.PROP_SO_TIMEOUT, 120000, null); @@ -415,7 +402,7 @@ IndexFetchResult fetchLatestIndex(boolean forceReplication) } /** - * This command downloads all the necessary files from leader to install a index commit point. + * This command downloads all the necessary files from leader to install an index commit point. * Only changed files are downloaded. It also downloads the conf files (if they are modified). * * @param forceReplication force a replication in all cases @@ -811,8 +798,7 @@ private void cleanup( Directory indexDir, boolean deleteTmpIdxDir, File tmpTlogDir, - boolean successfulInstall) - throws IOException { + boolean successfulInstall) { try { if (!successfulInstall) { try { @@ -1230,7 +1216,7 @@ private static Long getUsableSpace(String dir) { private long getApproxTotalSpaceReqd(long totalSpaceRequired) { long approxTotalSpaceReqd = (long) (totalSpaceRequired * 1.05); // add 5% extra for safety - // we should have an extra of 100MB free after everything is downloaded + // we should have an extra of 100 MB free after everything is downloaded approxTotalSpaceReqd += (100 * 1024 * 1024); return approxTotalSpaceReqd; } @@ -1381,7 +1367,7 @@ private static boolean slowFileExists(Directory dir, String fileName) throws IOE * All the files which are common between leader and follower must have same size and same * checksum else we assume they are not compatible (stale). * - * @return true if the index stale and we need to download a fresh copy, false otherwise. + * @return true if the index stale, and we need to download a fresh copy, false otherwise. * @throws IOException if low level io error */ private boolean isIndexStale(Directory dir) throws IOException { @@ -1468,7 +1454,7 @@ private boolean moveIndexFiles(Directory tmpIdxDir, Directory indexDir) { } // copy the segments file last if (segmentsFile != null) { - if (!moveAFile(tmpIdxDir, indexDir, segmentsFile)) return false; + return moveAFile(tmpIdxDir, indexDir, segmentsFile); } return true; } @@ -1526,49 +1512,6 @@ private void copyTmpConfFiles2Conf(Path tmpconfDir) { } } - /** - * The tlog files are moved from the tmp dir to the tlog dir as an atomic filesystem operation. A - * backup of the old directory is maintained. If the directory move fails, it will try to revert - * back the original tlog directory. - */ - private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) { - Path tlogDir = - FileSystems.getDefault().getPath(solrCore.getUpdateHandler().getUpdateLog().getTlogDir()); - Path backupTlogDir = - FileSystems.getDefault() - .getPath(tlogDir.getParent().toAbsolutePath().toString(), tmpTlogDir.getName()); - - try { - Files.move(tlogDir, backupTlogDir, StandardCopyOption.ATOMIC_MOVE); - } catch (IOException e) { - log.error("Unable to rename: {} to: {}", tlogDir, backupTlogDir, e); - return false; - } - - Path src = - FileSystems.getDefault() - .getPath(backupTlogDir.toAbsolutePath().toString(), tmpTlogDir.getName()); - try { - Files.move(src, tlogDir, StandardCopyOption.ATOMIC_MOVE); - } catch (IOException e) { - log.error("Unable to rename: {} to: {}", src, tlogDir, e); - - // In case of error, try to revert back the original tlog directory - try { - Files.move(backupTlogDir, tlogDir, StandardCopyOption.ATOMIC_MOVE); - } catch (IOException e2) { - // bad, we were not able to revert back the original tlog directory - throw new SolrException( - SolrException.ErrorCode.SERVER_ERROR, - "Unable to rename: " + backupTlogDir + " to: " + tlogDir); - } - - return false; - } - - return true; - } - private String getDateAsStr(Date d) { return new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(d); } @@ -1611,23 +1554,6 @@ private Collection> getModifiedConfFiles( return nameVsFile.isEmpty() ? Collections.emptyList() : nameVsFile.values(); } - /** - * This simulates File.delete exception-wise, since this class has some strange behavior with it. - * The only difference is it returns null on success, throws SecurityException on - * SecurityException, otherwise returns Throwable preventing deletion (instead of false), for - * additional information. - */ - static Throwable delete(File file) { - try { - Files.delete(file.toPath()); - return null; - } catch (SecurityException e) { - throw e; - } catch (Throwable other) { - return other; - } - } - static boolean delTree(File dir) { try { org.apache.lucene.util.IOUtils.rm(dir.toPath()); @@ -1743,8 +1669,7 @@ private class FileFetcher { Map fileDetails, String saveAs, String solrParamOutput, - long latestGen) - throws IOException { + long latestGen) { this.file = file; this.fileName = (String) fileDetails.get(NAME); this.size = (Long) fileDetails.get(SIZE); @@ -1794,7 +1719,7 @@ private void fetch() throws Exception { } } finally { cleanup(); - // if cleanup succeeds . The file is downloaded fully. do an fsync + // if cleanup succeeds then the file is downloaded fully, do a fsync. fsyncService.execute( () -> { try { @@ -1898,8 +1823,8 @@ private int fetchPackets(FastInputStream fis) throws Exception { } /** - * The webcontainer flushes the data only after it fills the buffer size. So, all data has to be - * read as readFully() other wise it fails. So read everything as bytes and then extract an + * The web container flushes the data only after it fills the buffer size. So, all data has to + * be read as readFully() otherwise it fails. So read everything as bytes and then extract an * integer out of it */ private int readInt(byte[] b) { @@ -1966,7 +1891,7 @@ private FastInputStream getStream() throws IOException { } // wt=filestream this is a custom protocol params.set(CommonParams.WT, FILE_STREAM); - // This happen if there is a failure there is a retry. the offset= ensures + // This happens if there is a failure there is a retry. the offset= ensures // that the server starts from the offset if (bytesDownloaded > 0) { params.set(OFFSET, Long.toString(bytesDownloaded)); @@ -2059,16 +1984,14 @@ protected class DirectoryFileFetcher extends FileFetcher { } private static class LocalFsFile implements FileInterface { - private File copy2Dir; FileChannel fileChannel; private FileOutputStream fileOutputStream; File file; LocalFsFile(File dir, String saveAs) throws IOException { - this.copy2Dir = dir; - this.file = new File(copy2Dir, saveAs); + this.file = new File(dir, saveAs); File parentDir = this.file.getParentFile(); if (!parentDir.exists()) { diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java index f22e68246e1..37d0d0526bc 100644 --- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -42,7 +42,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; @@ -154,8 +153,6 @@ public class ReplicationHandler extends RequestHandlerBase private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); SolrCore core; - private volatile boolean closed = false; - @Override public Name getPermissionName(AuthorizationContext request) { return Name.READ_PERM; @@ -228,8 +225,6 @@ public String toString() { private volatile long executorStartTime; - private int numTimesReplicated = 0; - private final Map confFileInfoCache = new HashMap<>(); private Long reserveCommitDuration = readIntervalMs("00:00:10"); @@ -285,9 +280,9 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw rsp, coreReplicationAPI.fetchFileList(Long.parseLong(solrParams.required().get(GENERATION)))); } else if (command.equalsIgnoreCase(CMD_BACKUP)) { - doSnapShoot(new ModifiableSolrParams(solrParams), rsp, req); + doSnapShoot(new ModifiableSolrParams(solrParams), rsp); } else if (command.equalsIgnoreCase(CMD_RESTORE)) { - restore(new ModifiableSolrParams(solrParams), rsp, req); + restore(new ModifiableSolrParams(solrParams), rsp); } else if (command.equalsIgnoreCase(CMD_RESTORE_STATUS)) { populateRestoreStatus(rsp); } else if (command.equalsIgnoreCase(CMD_DELETE_BACKUP)) { @@ -324,8 +319,7 @@ public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throw * @see IndexFetcher.LocalFsFileFetcher * @see IndexFetcher.DirectoryFileFetcher */ - private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp, SolrQueryRequest req) - throws IOException { + private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp, SolrQueryRequest req) { final CoreReplication coreReplicationAPI = new CoreReplication(core, req, rsp); String fileName; String dirType; @@ -366,16 +360,6 @@ private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp, SolrQue solrParams.getLong(GENERATION)); } - @SuppressWarnings("unchecked") - public static T getObjectWithBackwardCompatibility( - NamedList params, String preferredKey, String alternativeKey) { - Object value = params.get(preferredKey); - if (value != null) { - return (T) value; - } - return (T) params.get(alternativeKey); - } - private void reportErrorOnResponse(SolrQueryResponse response, String message, Exception e) { response.add(STATUS, ERR_STATUS); response.add(MESSAGE, message); @@ -512,8 +496,7 @@ boolean isReplicating() { return indexFetchLock.isLocked(); } - private void restore(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest req) - throws IOException { + private void restore(SolrParams params, SolrQueryResponse rsp) throws IOException { if (restoreFuture != null && !restoreFuture.isDone()) { throw new SolrException( ErrorCode.BAD_REQUEST, @@ -621,7 +604,7 @@ private void populateCommitInfo(SolrQueryResponse rsp) { rsp.add(STATUS, OK_STATUS); } - private void doSnapShoot(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest req) { + private void doSnapShoot(SolrParams params, SolrQueryResponse rsp) { try { int numberToKeep = params.getInt(NUMBER_BACKUPS_TO_KEEP_REQUEST_PARAM, 0); String location = params.get(CoreAdminParams.BACKUP_LOCATION); @@ -819,14 +802,6 @@ private Date getNextScheduledExecTime() { return nextTime; } - int getTimesReplicatedSinceStartup() { - return numTimesReplicated; - } - - void setTimesReplicatedSinceStartup() { - numTimesReplicated++; - } - @Override public Category getCategory() { return Category.REPLICATION; @@ -1062,7 +1037,7 @@ private NamedList getReplicationDetails( follower.add("replicationStartTime", replicationStartTimeStamp.toString()); } long elapsed = fetcher.getReplicationTimeElapsed(); - follower.add("timeElapsed", String.valueOf(elapsed) + "s"); + follower.add("timeElapsed", elapsed + "s"); if (bytesDownloaded > 0) estimatedTimeRemaining = @@ -1078,7 +1053,7 @@ private NamedList getReplicationDetails( follower.add("currentFileSizePercent", String.valueOf(percentDownloaded)); follower.add("bytesDownloaded", NumberUtils.readableSize(bytesDownloaded)); follower.add("totalPercent", String.valueOf(totalPercent)); - follower.add("timeRemaining", String.valueOf(estimatedTimeRemaining) + "s"); + follower.add("timeRemaining", estimatedTimeRemaining + "s"); follower.add("downloadSpeed", NumberUtils.readableSize(downloadSpeed)); } catch (Exception e) { log.error("Exception while writing replication details: ", e); @@ -1133,7 +1108,7 @@ private Object formatVal(String key, Properties props, Class clzz) { return null; } } else if (clzz == List.class) { - String ss[] = s.split(","); + String[] ss = s.split(","); List l = new ArrayList<>(); for (String s1 : ss) { l.add(new Date(Long.parseLong(s1)).toString()); @@ -1291,11 +1266,11 @@ public void inform(SolrCore core) { if (enableLeader) { includeConfFiles = (String) leader.get(CONF_FILES); if (includeConfFiles != null && includeConfFiles.trim().length() > 0) { - List files = Arrays.asList(includeConfFiles.split(",")); + String[] files = includeConfFiles.split(","); for (String file : files) { if (file.trim().length() == 0) continue; String[] strs = file.trim().split(":"); - // if there is an alias add it or it is null + // if there is an alias add it, or it is null confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null); } log.info("Replication enabled for following config files: {}", includeConfFiles); @@ -1366,7 +1341,7 @@ public void inform(SolrCore core) { } } - // ensure the writer is init'd so that we have a list of commit points + // ensure the writer is initialized so that we have a list of commit points RefCounted iw = core.getUpdateHandler().getSolrCoreState().getIndexWriter(core); iw.decref(); @@ -1526,7 +1501,6 @@ private Long readIntervalNs(String interval) { String hr = m.group(1); String min = m.group(2); String sec = m.group(3); - result = 0; try { if (sec != null && sec.length() > 0) result += Integer.parseInt(sec); if (min != null && min.length() > 0) result += (60 * Integer.parseInt(min)); @@ -1551,20 +1525,13 @@ private Long readIntervalNs(String interval) { public static final String FETCH_FROM_LEADER = "fetchFromLeader"; // in case of TLOG replica, if leaderVersion = zero, don't do commit - // otherwise updates from current tlog won't copied over properly to the new tlog, leading to data + // otherwise updates from current tlog won't be copied over properly to the new tlog, leading to + // data // loss // don't commit on leader version zero for PULL replicas as PULL should only get its index // state from leader public static final String SKIP_COMMIT_ON_LEADER_VERSION_ZERO = "skipCommitOnLeaderVersionZero"; - /** - * @deprecated Only used for backwards compatibility. Use {@link - * #SKIP_COMMIT_ON_LEADER_VERSION_ZERO} - */ - @Deprecated - public static final String LEGACY_SKIP_COMMIT_ON_LEADER_VERSION_ZERO = - "skipCommitOnMasterVersionZero"; - public static final String MESSAGE = "message"; public static final String COMMAND = "command"; @@ -1603,8 +1570,6 @@ private Long readIntervalNs(String interval) { public static final String ALIAS = "alias"; - public static final String CONF_CHECKSUM = "confchecksum"; - public static final String CONF_FILES = "confFiles"; public static final String REPLICATE_AFTER = "replicateAfter"; diff --git a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java index 5ceb3ee5108..5a08a59b525 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java +++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java @@ -462,7 +462,7 @@ public void doTestReplicateAfterWrite2Follower() throws Exception { index(followerClient, "id", 555, "name", "name = " + 555); followerClient.commit(true, true); - // this doc is added to follower so it should show an item w/ that result + // this doc is added to follower, so it should show an item w/ that result assertEquals(1, numFound(rQuery(1, "id:555", followerClient))); // Let's fetch the index rather than rely on the polling. @@ -531,7 +531,7 @@ public void doTestIndexAndConfigReplication() throws Exception { followerJetty.stop(); - // setup an sub directory /foo/ in order to force subdir file replication + // set up a subdirectory /foo/ in order to force subdirectory file replication File leaderFooDir = new File(leader.getConfDir() + File.separator + "foo"); File leaderBarFile = new File(leaderFooDir, "bar.txt"); assertTrue("could not make dir " + leaderFooDir, leaderFooDir.mkdirs()); @@ -599,8 +599,8 @@ public void doTestStopPoll() throws Exception { // get docs from leader and check if number is equal to leader assertEquals(nDocs + 1, numFound(rQuery(nDocs + 1, "*:*", leaderClient))); - // NOTE: this test is wierd, we want to verify it DOESNT replicate... - // for now, add a sleep for this.., but the logic is wierd. + // NOTE: this test is weird, we want to verify it DOESN'T replicate... + // for now, add a sleep for this..., but the logic is weird. Thread.sleep(3000); // get docs from follower and check if number is not equal to leader; polling is disabled @@ -1002,7 +1002,7 @@ private void checkForSingleIndex(JettySolrRunner jetty, boolean afterReload) { CoreContainer cores = jetty.getCoreContainer(); Collection theCores = cores.getCores(); for (SolrCore core : theCores) { - String ddir = core.getDataDir(); + String dataDir = core.getDataDir(); CachingDirectoryFactory dirFactory = getCachingDirectoryFactory(core); synchronized (dirFactory) { Set livePaths = dirFactory.getLivePaths(); @@ -1019,22 +1019,23 @@ private void checkForSingleIndex(JettySolrRunner jetty, boolean afterReload) { // :TODO: assert that one of the paths is a subpath of hte other } if (dirFactory instanceof StandardDirectoryFactory) { - System.out.println(Arrays.asList(new File(ddir).list())); + System.out.println(Arrays.asList(new File(dataDir).list())); // we also allow one extra index dir - it may not be removed until the core is closed - int cnt = indexDirCount(ddir); + int cnt = indexDirCount(dataDir); // if after reload, there may be 2 index dirs while the reloaded SolrCore closes. if (afterReload) { - assertTrue("found:" + cnt + Arrays.asList(new File(ddir).list()), 1 == cnt || 2 == cnt); + assertTrue( + "found:" + cnt + Arrays.asList(new File(dataDir).list()), 1 == cnt || 2 == cnt); } else { - assertEquals("found:" + cnt + Arrays.asList(new File(ddir).list()), 1, cnt); + assertEquals("found:" + cnt + Arrays.asList(new File(dataDir).list()), 1, cnt); } } } } - private int indexDirCount(String ddir) { + private int indexDirCount(String dir) { String[] list = - new File(ddir) + new File(dir) .list( new FilenameFilter() { @Override @@ -1586,7 +1587,7 @@ public void testEmptyBackups() throws Exception { index(leaderClient, "id", "1", "name", "foo"); - { // second backup w/uncommited doc + { // second backup w/ uncommitted doc final String backupName = "empty_backup2"; final GenericSolrRequest req = new GenericSolrRequest( @@ -1698,7 +1699,7 @@ private Date watchCoreStartAt(JettySolrRunner jettySolrRunner, final Date min) return startTime; } } catch (SolrException e) { - // workarround for SOLR-4668 + // workaround for SOLR-4668 if (500 != e.code()) { throw e; } // else server possibly from the core reload in progress... @@ -1708,7 +1709,7 @@ private Date watchCoreStartAt(JettySolrRunner jettySolrRunner, final Date min) Thread.sleep(sleepInterval); } fail("timed out waiting for collection1 startAt time to exceed: " + min); - return min; // compilation neccessity + return min; // compilation necessity } }