Skip to content

Commit

Permalink
chore(store): translate CJK comments to English for store-cli, store-…
Browse files Browse the repository at this point in the history
…client, store-common, store-core (#2623)

* fixup Chinese punctuation
  • Loading branch information
VGalaxies authored Aug 10, 2024
1 parent 2e27c58 commit c231a6f
Show file tree
Hide file tree
Showing 56 changed files with 456 additions and 456 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ public static void main(String[] args) {
@Override
public void run(String... args) throws IOException, InterruptedException, PDException {
if (args.length <= 0) {
log.warn("参数类型 cmd[-load, -query, -scan]");
log.warn("Parameter type cmd[-load, -query, -scan]");
} else {
switch (args[0]) {
case "-load":
Expand Down Expand Up @@ -80,7 +80,7 @@ public void run(String... args) throws IOException, InterruptedException, PDExce
break;
case "-scan":
if (args.length < 4) {
log.warn("参数类型 -scan pd graphName tableName");
log.warn("Parameter type -scan pd graphName tableName");
} else {
doScan(args[1], args[2], args[3]);
}
Expand All @@ -94,7 +94,7 @@ public void run(String... args) throws IOException, InterruptedException, PDExce
scanner.getDataSingle();
break;
default:
log.warn("参数类型错误,未执行任何程序");
log.warn("Parameter type error, no program executed");
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,13 @@
import lombok.extern.slf4j.Slf4j;

/**
* 使用pd,支持raft
* 读取文件并多线程进行入库
* Use pd, support raft
* Read files and perform multi-threaded storage processing.
*/
@Slf4j
public class HgThread2DB {

/*正在进行和在排队的任务的总数*/
/* Total number of tasks in progress and in queue */
private static final AtomicInteger taskTotal = new AtomicInteger(0);
private static final AtomicInteger queryTaskTotal = new AtomicInteger(0);
private static final AtomicLong insertDataCount = new AtomicLong();
Expand Down Expand Up @@ -204,7 +204,7 @@ public boolean testOrder(String input) {
}

/**
* 多线程读取文件入库
* Multithreaded file reading and storage into database
*
* @throws IOException
* @throws InterruptedException
Expand All @@ -216,14 +216,14 @@ public void startMultiprocessInsert(String filepath) throws IOException {
MetricX metrics = null;
long dataCount = 0;
if (readfile.exists()) {
// 读取文件
// Read file
InputStreamReader isr = new InputStreamReader(new FileInputStream(readfile),
StandardCharsets.UTF_8);
BufferedReader reader = new BufferedReader(isr);

String strLine = null;
String tableName = HgCliUtil.TABLE_NAME;
// 积攒到多少个后执行线程入库,10万
// Accumulate to how many threads before executing thread storage, 100,000
int maxlist = 100000;
List<String> keys = new ArrayList<>(maxlist);
metrics = MetricX.ofStart();
Expand All @@ -232,7 +232,7 @@ public void startMultiprocessInsert(String filepath) throws IOException {
keys.add(strLine);
dataCount++;

// 读取文件中的10000条数据,启一个线程入库
// Read 10000 pieces of data from the file, start a thread for data storage.
if (dataCount % maxlist == 0) {
List<String> finalKeys = keys;
Runnable task = () -> {
Expand Down Expand Up @@ -266,7 +266,7 @@ public void startMultiprocessInsert(String filepath) throws IOException {

isr.close();
reader.close();
// 把剩余的入库
// Move the remaining items into storage
if (!keys.isEmpty()) {
List<String> finalKeys1 = keys;
Runnable task = () -> {
Expand Down Expand Up @@ -299,18 +299,18 @@ public void startMultiprocessInsert(String filepath) throws IOException {
threadPool.shutdown();

} else {
System.out.println("样本文件不存在:" + filepath);
System.out.println("Sample file does not exist: " + filepath);
}
metrics.end();
log.info("*************************************************");
log.info(" 主进程执行时间:" + metrics.past() / 1000 + "秒,一共执行:" + dataCount + "");
log.info(" Main process execution time: " + metrics.past() / 1000 + " seconds, total executed: " + dataCount + " items");
log.info("*************************************************");
System.out.println(" 主进程执行时间 " + metrics.past() / 1000 + "");
System.out.println("-----主进程执行结束---------");
System.out.println(" Main process execution time " + metrics.past() / 1000 + " seconds");
System.out.println("-----Main process execution ends---------");
}

/**
* 多线程读取文件入库
* Multithreaded file reading and storage into database
*
* @throws IOException
* @throws InterruptedException
Expand All @@ -324,7 +324,7 @@ public void autoMultiprocessInsert() throws IOException {

String strLine = null;
String tableName = HgCliUtil.TABLE_NAME;
// 积攒到多少个后执行线程入库,10万
// Accumulate to how many to execute thread storage, 100,000
int maxlist = 100000;
List<String> keys = new ArrayList<>(maxlist);
for (int x = 0; x < 10000000; x++) {
Expand Down Expand Up @@ -371,20 +371,20 @@ public void autoMultiprocessInsert() throws IOException {

metrics.end();
log.info("*************************************************");
log.info(" 主进程执行时间:" + metrics.past() / 1000 + "秒,一共执行:" + dataCount + "");
log.info(" Main process execution time: " + metrics.past() / 1000 + " seconds, total executed: " + dataCount + " items");
log.info("*************************************************");
System.out.println(" 主进程执行时间 " + metrics.past() / 1000 + "");
System.out.println("-----主进程执行结束---------");
System.out.println(" Main process execution time " + metrics.past() / 1000 + " seconds");
System.out.println("-----Main process ends---------");
}

public String getLong() {
//如果需要更长 或者更大冗余空间, 只需要 time * 10^n 即可
//当前可保证1毫秒 生成 10000条不重复
// If needed longer or more redundant space, just use time * 10^n
//Currently guaranteed to generate 10000 unique items in 1 millisecond.
return String.format("%019x", longId.getAndIncrement());
}

/**
* 执行查询,并将查询的结果做为下一次迭代的点放入队列
* Execute the query, and put the results of the query into the queue as the point for the next iteration.
*/
private void queryAnd2Queue() {
try {
Expand All @@ -409,7 +409,7 @@ private void queryAnd2Queue() {
HgKvEntry entry = iterator.next();
String newPoint = HgCliUtil.toStr(entry.value());
// log.info("query_key =" + newPoint);
// 统计查询次数
// Statistical query times
if (!newPoint.isEmpty() && hashSet.add(newPoint)) {
queryCount.getAndIncrement();
totalQueryCount.getAndIncrement();
Expand All @@ -432,7 +432,7 @@ private void queryAnd2Queue() {
}
}
}
// 达到1万个点后,去查询一次
// After reaching 10,000 points, query once.
if (newQueryList.size() > 10000 && listQueue.size() < 10000) {
listQueue.put(newQueryList);
insertQueueCount++;
Expand All @@ -444,7 +444,7 @@ private void queryAnd2Queue() {
}
}
}
// 一次查询如果不够1万,单独提交一次查询,确保所有的结果都能执行查询
// If a query is less than 10,000, submit a separate query to ensure that all results can execute the query.
if (!newQueryList.isEmpty() && listQueue.size() < 1000) {
listQueue.put(newQueryList);
}
Expand All @@ -459,10 +459,10 @@ private void queryAnd2Queue() {
}

/**
* 多线程查询
* Multithreaded query
*
* @param point 起始查询点,后续根据这个点查询到的value做为下一次的查询条件进行迭代
* @param scanCount 允许启动的线程数量
* @param point Starting query point, subsequent queries will use the value obtained from this point as the next query condition for iteration.
* @param scanCount The number of threads allowed to start
* @throws IOException
* @throws InterruptedException
*/
Expand Down Expand Up @@ -559,10 +559,10 @@ public HgOwnerKey next() {

metrics.end();
log.info("*************************************************");
log.info(" 主进程执行时间:" + metrics.past() / 1000 + "秒; 查询:" + totalQueryCount.get()
+ "次,qps:" + totalQueryCount.get() * 1000 / metrics.past());
log.info(" Main process execution time: " + metrics.past() / 1000 + " seconds; Queries: " + totalQueryCount.get()
+ "times, qps:" + totalQueryCount.get() * 1000 / metrics.past());
log.info("*************************************************");
System.out.println("-----主进程执行结束---------");
System.out.println("-----Main process ends---------");
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ public void onNext(ScanResponse value) {

@Override
public void onError(Throwable t) {
log.warn("调用grpc接口发生错误", t);
log.warn("Calling grpc interface encountered an error", t);
latch.countDown();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ public interface HgKvStore {
boolean put(String table, HgOwnerKey ownerKey, byte[] value);

/**
* 该版本被store内部使用。向分区写入数据,
* partitionId与key.keyCode必须与pd存储的分区信息保持一致。
* This version is used internally by the store. Write data to the partition,
* partitionId and key.keyCode must be consistent with the partition information stored in pd.
*/
boolean directPut(String table, int partitionId, HgOwnerKey key, byte[] value);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@
public final class HgNodePartition {

private final Long nodeId;
//当前key的hashcode
// Current key's hash code
private final Integer keyCode;

//分区的开始结束范围
// Partition start-end range
private final Integer startKey;
private final Integer endKey;
private int hash = -1;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public HgStoreNodePartitionerImpl(PDClient pdClient, HgStoreNodeManager nodeMana
}

/**
* 查询分区信息,结果通过HgNodePartitionerBuilder返回
* Query partition information, the result is returned through HgNodePartitionerBuilder.
*/
@Override
public int partition(HgNodePartitionerBuilder builder, String graphName,
Expand Down Expand Up @@ -141,7 +141,7 @@ public int partition(HgNodePartitionerBuilder builder, String graphName,
}

/**
* 查询hgstore信息
* Query hgstore information
*
* @return hgstore
*/
Expand All @@ -157,7 +157,7 @@ public HgStoreNode apply(String graphName, Long nodeId) {
}

/**
* 通知更新缓存
* Notice to update cache
*/
@Override
public int notice(String graphName, HgStoreNotice storeNotice) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -377,11 +377,11 @@ <T> Optional<T> retryingInvoke(Supplier<T> supplier) {
if (i + 1 <= NODE_MAX_RETRYING_TIMES) {
try {
int sleepTime;
// 前三次每隔一秒做一次尝试
// The first three times try once every second
if (i < 3) {
sleepTime = 1;
} else {
// 后面逐次递增
// Subsequent incremental
sleepTime = i - 1;
}
log.info("Waiting {} seconds " +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ public boolean doAction(String table, HgOwnerKey startKey, Integer code,
}

private List<NodeTkv> toNodeTkvList(Builder scanReqBuilder) {
// TODO 使用builder获取owner
// TODO: use builder to get owner
String table = scanReqBuilder.getTable();
HgOwnerKey ownerKey = HgStoreClientConst.ALL_PARTITION_OWNER_KEY;
byte[] allOwner = ownerKey.getOwner();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ KvCloseableIterator<Kv> doBatchScan(HgStoreNodeSession nodeSession, HgScanQuery
return KvBatchScanner5.scan(nodeSession, this.getStub(nodeSession), scanQuery);
}

// 返回多个小的迭代器,允许上层并行处理
// Return multiple small iterators, allowing upper-level parallel processing
KvCloseableIterator<HgKvIterator<HgKvEntry>> doBatchScan3(HgStoreNodeSession nodeSession,
HgScanQuery scanQuery,
KvCloseableIterator iterator) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ static Key.Builder getOwnerKeyBuilder() {
Key.Builder builder = keyBuilder.get();
if (builder == null) {
builder = Key.newBuilder();
// TODO 线程级变量,寻找删除时机
// TODO: Thread-level variables, find the timing for deletion
keyBuilder.set(builder);
}
return builder;
Expand All @@ -86,7 +86,7 @@ static Key toKey(HgOwnerKey ownerKey) {
Key.Builder builder = keyBuilder.get();
if (builder == null) {
builder = Key.newBuilder();
// TODO 线程级变量,寻找删除时机
// TODO: Thread-level variables, find the timing for deletion
keyBuilder.set(builder);
}
return builder
Expand Down
Loading

0 comments on commit c231a6f

Please sign in to comment.