From 455cbec5262c451b8975ff276b3b0b191326e9e8 Mon Sep 17 00:00:00 2001 From: wuhanqing Date: Fri, 7 Aug 2020 17:48:04 +0800 Subject: [PATCH] zeroize memory copy Change-Id: I2d389b24446623e8f639642a2b354120f20dbe56 --- include/client/libcurve.h | 12 +- nebd/src/part2/file_service.cpp | 34 +- nebd/src/part2/request_executor_curve.cpp | 6 +- nebd/test/part1/nebd_client_unittest.cpp | 2 +- nebd/test/part2/file_service_unittest.cpp | 16 +- nebd/test/part2/mock_curve_client.h | 6 +- .../part2/test_request_executor_curve.cpp | 16 +- src/client/chunk_closure.cpp | 12 +- src/client/client_common.cpp | 50 --- src/client/client_common.h | 23 +- src/client/copyset_client.cpp | 5 +- src/client/copyset_client.h | 5 +- src/client/file_instance.cpp | 8 +- src/client/file_instance.h | 6 +- src/client/io_condition_varaiable.h | 16 +- src/client/io_tracker.cpp | 162 ++++++-- src/client/io_tracker.h | 104 +++-- src/client/iomanager4chunk.cpp | 11 +- src/client/iomanager4file.cpp | 43 +- src/client/iomanager4file.h | 391 +++++++++--------- src/client/libcurve_client.cpp | 10 +- src/client/libcurve_file.cpp | 10 +- src/client/libcurve_file.h | 8 +- src/client/request_context.cpp | 28 +- src/client/request_context.h | 61 ++- src/client/request_scheduler.cpp | 29 +- src/client/request_scheduler.h | 4 +- src/client/request_sender.cpp | 13 +- src/client/request_sender.h | 5 +- src/client/splitor.cpp | 123 +++--- src/client/splitor.h | 22 +- test/chunkserver/clone/clone_copyer_test.cpp | 16 +- test/client/client_metric_test.cpp | 2 +- test/client/client_session_unittest.cpp | 2 +- test/client/copyset_client_test.cpp | 180 ++++---- test/client/fake/BUILD | 1 + test/client/fake/mock_schedule.cpp | 142 +++---- test/client/fake/mock_schedule.h | 8 +- test/client/iotracker_splitor_unittest.cpp | 259 ++++++------ ...cutor_test.cpp => lease_executor_test.cpp} | 9 + test/client/libcurve_client_unittest.cpp | 7 +- test/client/libcurve_interface_unittest.cpp | 52 ++- test/client/mock_file_client.h | 4 +- test/client/mock_request_context.h | 3 + test/client/request_scheduler_test.cpp | 112 ++--- test/client/request_sender_test.cpp | 4 +- 46 files changed, 1074 insertions(+), 968 deletions(-) delete mode 100644 src/client/client_common.cpp rename test/client/{lease_excutor_test.cpp => lease_executor_test.cpp} (86%) diff --git a/include/client/libcurve.h b/include/client/libcurve.h index 652bc12272..f47d7b86c0 100644 --- a/include/client/libcurve.h +++ b/include/client/libcurve.h @@ -380,6 +380,11 @@ namespace client { class FileClient; +enum class UserDataType { + RawBuffer, // char* + IOBuffer // butil::IOBuf* +}; + // 存储用户信息 typedef struct UserInfo { // 当前执行的owner信息 @@ -461,17 +466,20 @@ class CurveClient { * 异步读 * @param fd 文件fd * @param aioctx 异步读写的io上下文 + * @param dataType type of user buffer * @return 返回错误码 */ - virtual int AioRead(int fd, CurveAioContext* aioctx); + virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType); /** * 异步写 * @param fd 文件fd * @param aioctx 异步读写的io上下文 + * @param dataType type of user buffer * @return 返回错误码 */ - virtual int AioWrite(int fd, CurveAioContext* aioctx); + virtual int AioWrite(int fd, CurveAioContext* aioctx, + UserDataType dataType); /** * 测试使用,设置fileclient diff --git a/nebd/src/part2/file_service.cpp b/nebd/src/part2/file_service.cpp index a3fec4dd74..999343d613 100644 --- a/nebd/src/part2/file_service.cpp +++ b/nebd/src/part2/file_service.cpp @@ -23,6 +23,8 @@ #include #include +#include + #include "nebd/src/part2/file_service.h" namespace nebd { @@ -30,22 +32,17 @@ namespace server { using nebd::client::RetCode; -static void AioReadDeleter(void* m) { - delete[] reinterpret_cast(m); -} - void NebdFileServiceCallback(NebdServerAioContext* context) { CHECK(context != nullptr); std::unique_ptr contextGuard(context); + std::unique_ptr iobufGuard( + reinterpret_cast(context->buf)); brpc::ClosureGuard doneGuard(context->done); switch (context->op) { case LIBAIO_OP::LIBAIO_OP_READ: { nebd::client::ReadResponse* response = dynamic_cast(context->response); - butil::IOBuf readBuf; - readBuf.append_user_data( - context->buf, context->size, AioReadDeleter); if (context->ret < 0) { response->set_retcode(RetCode::kNoOK); LOG(ERROR) << "Read file failed. " @@ -53,7 +50,8 @@ void NebdFileServiceCallback(NebdServerAioContext* context) { } else { brpc::Controller* cntl = dynamic_cast(context->cntl); - cntl->response_attachment().append(readBuf); + cntl->response_attachment() = + *reinterpret_cast(context->buf); response->set_retcode(RetCode::kOK); } break; @@ -69,7 +67,6 @@ void NebdFileServiceCallback(NebdServerAioContext* context) { } else { response->set_retcode(RetCode::kOK); } - delete[] reinterpret_cast(context->buf); break; } case LIBAIO_OP::LIBAIO_OP_FLUSH: @@ -141,19 +138,21 @@ void NebdFileServiceImpl::Write( aioContext->cb = NebdFileServiceCallback; brpc::Controller* cntl = dynamic_cast(cntl_base); - aioContext->buf = new char[aioContext->size]; - size_t copySize = - cntl->request_attachment().copy_to(aioContext->buf, aioContext->size); + + std::unique_ptr buf(new butil::IOBuf()); + *buf = cntl->request_attachment(); + + size_t copySize = buf->size(); if (copySize != aioContext->size) { LOG(ERROR) << "Copy attachment failed. " << "fd: " << request->fd() << ", offset: " << request->offset() << ", size: " << request->size() << ", copy size: " << copySize; - delete[] reinterpret_cast(aioContext->buf); return; } + aioContext->buf = buf.get(); aioContext->response = response; aioContext->done = done; aioContext->cntl = cntl_base; @@ -164,8 +163,8 @@ void NebdFileServiceImpl::Write( << ", offset: " << request->offset() << ", size: " << request->size() << ", return code: " << rc; - delete[] reinterpret_cast(aioContext->buf); } else { + buf.release(); doneGuard.release(); } } @@ -184,7 +183,10 @@ void NebdFileServiceImpl::Read( aioContext->size = request->size(); aioContext->op = LIBAIO_OP::LIBAIO_OP_READ; aioContext->cb = NebdFileServiceCallback; - aioContext->buf = new char[request->size()]; + + std::unique_ptr buf(new butil::IOBuf()); + aioContext->buf = buf.get(); + aioContext->response = response; aioContext->done = done; aioContext->cntl = cntl_base; @@ -195,8 +197,8 @@ void NebdFileServiceImpl::Read( << ", offset: " << request->offset() << ", size: " << request->size() << ", return code: " << rc; - delete[] reinterpret_cast(aioContext->buf); } else { + buf.release(); doneGuard.release(); } } diff --git a/nebd/src/part2/request_executor_curve.cpp b/nebd/src/part2/request_executor_curve.cpp index 85121bd94b..7c3a08082c 100644 --- a/nebd/src/part2/request_executor_curve.cpp +++ b/nebd/src/part2/request_executor_curve.cpp @@ -176,7 +176,8 @@ int CurveRequestExecutor::AioRead( return -1; } - ret = client_->AioRead(curveFd, &curveCombineCtx->curveCtx); + ret = client_->AioRead(curveFd, &curveCombineCtx->curveCtx, + curve::client::UserDataType::IOBuffer); if (ret != LIBCURVE_ERROR::OK) { delete curveCombineCtx; return -1; @@ -200,7 +201,8 @@ int CurveRequestExecutor::AioWrite( return -1; } - ret = client_->AioWrite(curveFd, &curveCombineCtx->curveCtx); + ret = client_->AioWrite(curveFd, &curveCombineCtx->curveCtx, + curve::client::UserDataType::IOBuffer); if (ret != LIBCURVE_ERROR::OK) { delete curveCombineCtx; return -1; diff --git a/nebd/test/part1/nebd_client_unittest.cpp b/nebd/test/part1/nebd_client_unittest.cpp index aed6734213..bf06340f3b 100644 --- a/nebd/test/part1/nebd_client_unittest.cpp +++ b/nebd/test/part1/nebd_client_unittest.cpp @@ -291,7 +291,7 @@ TEST_F(NebdFileClientTest, CommonTest) { ASSERT_EQ(0, Extend4Nebd(fd, kFileSize)); ASSERT_EQ(kFileSize, GetFileSize4Nebd(fd)); - ASSERT_EQ(-1, GetInfo4Nebd(1)); + ASSERT_EQ(kFileSize, GetInfo4Nebd(fd)); ASSERT_EQ(0, InvalidCache4Nebd(fd)); char buffer[kBufSize]; diff --git a/nebd/test/part2/file_service_unittest.cpp b/nebd/test/part2/file_service_unittest.cpp index 00f7415794..a720a50b02 100644 --- a/nebd/test/part2/file_service_unittest.cpp +++ b/nebd/test/part2/file_service_unittest.cpp @@ -124,7 +124,13 @@ TEST_F(FileServiceTest, WriteTest) { EXPECT_CALL(*fileManager_, AioWrite(fd, NotNull())) .WillOnce(DoAll(SaveArg<1>(&aioCtx), Return(0))); fileService_->Write(&cntl, &request, &response, &done); - ASSERT_EQ(0, strncmp((char*)aioCtx->buf, buf, kSize)); // NOLINT + + butil::IOBuf data; + data.append(buf, kSize); + ASSERT_EQ( + *reinterpret_cast(aioCtx->buf), + data); + ASSERT_FALSE(done.IsRunned()); // write failed @@ -342,7 +348,7 @@ TEST_F(FileServiceTest, CallbackTest) { context->offset = 0; context->size = 4096; context->done = &done; - context->buf = new char[4096]; + context->buf = new butil::IOBuf(); context->ret = 0; NebdFileServiceCallback(context); ASSERT_TRUE(done.IsRunned()); @@ -360,7 +366,7 @@ TEST_F(FileServiceTest, CallbackTest) { context->offset = 0; context->size = 4096; context->done = &done; - context->buf = new char[4096]; + context->buf = new butil::IOBuf(); context->ret = -1; NebdFileServiceCallback(context); ASSERT_TRUE(done.IsRunned()); @@ -378,7 +384,7 @@ TEST_F(FileServiceTest, CallbackTest) { context->offset = 0; context->size = 4096; context->done = &done; - context->buf = new char[4096]; + context->buf = new butil::IOBuf(); context->ret = 0; NebdFileServiceCallback(context); ASSERT_TRUE(done.IsRunned()); @@ -396,7 +402,7 @@ TEST_F(FileServiceTest, CallbackTest) { context->offset = 0; context->size = 4096; context->done = &done; - context->buf = new char[4096]; + context->buf = new butil::IOBuf(); context->ret = -1; NebdFileServiceCallback(context); ASSERT_TRUE(done.IsRunned()); diff --git a/nebd/test/part2/mock_curve_client.h b/nebd/test/part2/mock_curve_client.h index 955fabb48c..ce12097c98 100644 --- a/nebd/test/part2/mock_curve_client.h +++ b/nebd/test/part2/mock_curve_client.h @@ -41,8 +41,10 @@ class MockCurveClient : public ::curve::client::CurveClient { MOCK_METHOD1(Close, int(int)); MOCK_METHOD2(Extend, int(const std::string&, int64_t)); MOCK_METHOD1(StatFile, int64_t(const std::string&)); - MOCK_METHOD2(AioRead, int(int, CurveAioContext*)); - MOCK_METHOD2(AioWrite, int(int, CurveAioContext*)); + MOCK_METHOD3(AioRead, + int(int, CurveAioContext*, curve::client::UserDataType)); + MOCK_METHOD3(AioWrite, + int(int, CurveAioContext*, curve::client::UserDataType)); }; } // namespace server diff --git a/nebd/test/part2/test_request_executor_curve.cpp b/nebd/test/part2/test_request_executor_curve.cpp index 5bbd8d9520..0f85914f65 100644 --- a/nebd/test/part2/test_request_executor_curve.cpp +++ b/nebd/test/part2/test_request_executor_curve.cpp @@ -267,7 +267,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { // 1. nebdFileIns不是CurveFileInstance类型, 异步读失败 { auto nebdFileIns = new NebdFileInstance(); - EXPECT_CALL(*curveClient_, AioRead(_, _)).Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioRead(nebdFileIns, &aiotcx)); } @@ -275,7 +275,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; - EXPECT_CALL(*curveClient_, AioRead(_, _)).Times(0); + EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } @@ -288,7 +288,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { aiotcx.op = LIBAIO_OP::LIBAIO_OP_READ; curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; - EXPECT_CALL(*curveClient_, AioRead(1, _)) + EXPECT_CALL(*curveClient_, AioRead(1, _, _)) .WillOnce(Return(LIBCURVE_ERROR::FAILED)); ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } @@ -299,7 +299,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; - EXPECT_CALL(*curveClient_, AioRead(1, _)) + EXPECT_CALL(*curveClient_, AioRead(1, _, _)) .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioRead(curveFileIns, &aiotcx)); @@ -316,7 +316,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { // 1. nebdFileIns不是CurveFileInstance类型, 异步写失败 { auto nebdFileIns = new NebdFileInstance(); - EXPECT_CALL(*curveClient_, AioWrite(_, _)).Times(0); + EXPECT_CALL(*curveClient_, AioWrite(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioWrite(nebdFileIns, &aiotcx)); } @@ -324,7 +324,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; - EXPECT_CALL(*curveClient_, AioWrite(_, _)).Times(0); + EXPECT_CALL(*curveClient_, AioWrite(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } @@ -337,7 +337,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { aiotcx.op = LIBAIO_OP::LIBAIO_OP_READ; curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; - EXPECT_CALL(*curveClient_, AioWrite(1, _)) + EXPECT_CALL(*curveClient_, AioWrite(1, _, _)) .WillOnce(Return(LIBCURVE_ERROR::FAILED)); ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } @@ -348,7 +348,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { curveFileIns->fd = 1; curveFileIns->fileName = curveFilename; CurveAioContext* curveCtx; - EXPECT_CALL(*curveClient_, AioWrite(1, _)) + EXPECT_CALL(*curveClient_, AioWrite(1, _, _)) .WillOnce(DoAll(SaveArg<1>(&curveCtx), Return(LIBCURVE_ERROR::OK))); ASSERT_EQ(0, executor.AioWrite(curveFileIns, &aiotcx)); diff --git a/src/client/chunk_closure.cpp b/src/client/chunk_closure.cpp index 337ef3461c..a52073263c 100644 --- a/src/client/chunk_closure.cpp +++ b/src/client/chunk_closure.cpp @@ -503,7 +503,7 @@ void ClientClosure::OnInvalidRequest() { void WriteChunkClosure::SendRetryRequest() { client_->WriteChunk(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->writeBuffer_, + reqCtx_->writeData_, reqCtx_->offset_, reqCtx_->rawlength_, reqCtx_->sourceInfo_, @@ -531,9 +531,7 @@ void ReadChunkClosure::SendRetryRequest() { void ReadChunkClosure::OnSuccess() { ClientClosure::OnSuccess(); - cntl_->response_attachment().copy_to( - reqCtx_->readBuffer_, - cntl_->response_attachment().size()); + reqCtx_->readData_ = cntl_->response_attachment(); metaCache_->UpdateAppliedIndex( reqCtx_->idinfo_.lpid_, @@ -545,7 +543,7 @@ void ReadChunkClosure::OnChunkNotExist() { ClientClosure::OnChunkNotExist(); reqDone_->SetFailed(0); - memset(reqCtx_->readBuffer_, 0, reqCtx_->rawlength_); + reqCtx_->readData_.resize(reqCtx_->rawlength_, 0); metaCache_->UpdateAppliedIndex(chunkIdInfo_.lpid_, chunkIdInfo_.cpid_, response_->appliedindex()); } @@ -560,9 +558,7 @@ void ReadChunkSnapClosure::SendRetryRequest() { void ReadChunkSnapClosure::OnSuccess() { ClientClosure::OnSuccess(); - cntl_->response_attachment().copy_to( - reqCtx_->readBuffer_, - cntl_->response_attachment().size()); + reqCtx_->readData_ = cntl_->response_attachment(); } void DeleteChunkSnapClosure::SendRetryRequest() { diff --git a/src/client/client_common.cpp b/src/client/client_common.cpp deleted file mode 100644 index 888abc3bf1..0000000000 --- a/src/client/client_common.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2020 NetEase Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Project: curve - * Create Data: 2019年11月13日 - * Author: wuhanqing - */ - -#include "src/client/client_common.h" - -namespace curve { -namespace client { - -const char* OpTypeToString(OpType optype) { - switch (optype) { - case OpType::READ: - return "Read"; - case OpType::WRITE: - return "Write"; - case OpType::READ_SNAP: - return "ReadSnapshot"; - case OpType::DELETE_SNAP: - return "DeleteSnapshot"; - case OpType::RECOVER_CHUNK: - return "RecoverChunk"; - case OpType::GET_CHUNK_INFO: - return "GetChunkInfo"; - case OpType::UNKNOWN: - default: - return "Unknown"; - } -} - -} // namespace client -} // namespace curve - diff --git a/src/client/client_common.h b/src/client/client_common.h index 8167d7dcab..d5a0802a34 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -217,7 +217,26 @@ struct ChunkServerAddr { } }; -const char* OpTypeToString(OpType optype); +inline const char* OpTypeToString(OpType optype) { + switch (optype) { + case OpType::READ: + return "Read"; + case OpType::WRITE: + return "Write"; + case OpType::READ_SNAP: + return "ReadSnapshot"; + case OpType::DELETE_SNAP: + return "DeleteSnapshot"; + case OpType::RECOVER_CHUNK: + return "RecoverChunk"; + case OpType::GET_CHUNK_INFO: + return "GetChunkInfo"; + case OpType::UNKNOWN: + default: + return "Unknown"; + } +} + struct ClusterContext { std::string clusterId; }; @@ -273,6 +292,8 @@ class ClientDummyServerInfo { bool register_ = false; }; +inline void TrivialDeleter(void* ptr) {} + } // namespace client } // namespace curve diff --git a/src/client/copyset_client.cpp b/src/client/copyset_client.cpp index 488d17db63..86d662479d 100644 --- a/src/client/copyset_client.cpp +++ b/src/client/copyset_client.cpp @@ -132,7 +132,8 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, } int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, uint64_t sn, - const char* buf, off_t offset, size_t length, + const butil::IOBuf& data, + off_t offset, size_t length, const RequestSourceInfo& sourceInfo, google::protobuf::Closure* done) { std::shared_ptr senderPtr = nullptr; @@ -172,7 +173,7 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, uint64_t sn, auto task = [&](Closure* done, std::shared_ptr senderPtr) { WriteChunkClosure* writeDone = new WriteChunkClosure(this, done); - senderPtr->WriteChunk(idinfo, sn, buf, offset, length, sourceInfo, + senderPtr->WriteChunk(idinfo, sn, data, offset, length, sourceInfo, writeDone); }; diff --git a/src/client/copyset_client.h b/src/client/copyset_client.h index c8511c9a76..83a6adee08 100644 --- a/src/client/copyset_client.h +++ b/src/client/copyset_client.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -101,7 +102,7 @@ class CopysetClient : public Uncopyable { * 写Chunk * @param idinfo为chunk相关的id信息 * @param sn:文件版本号 - * @param buf:要写入的数据 + * @param writeData:要写入的数据 *@param offset:写的偏移 * @param length:写的长度 * @param sourceInfo chunk克隆源信息 @@ -109,7 +110,7 @@ class CopysetClient : public Uncopyable { */ int WriteChunk(const ChunkIDInfo& idinfo, uint64_t sn, - const char *buf, + const butil::IOBuf& writeData, off_t offset, size_t length, const RequestSourceInfo& sourceInfo, diff --git a/src/client/file_instance.cpp b/src/client/file_instance.cpp index ebbadc253c..973dab7d14 100644 --- a/src/client/file_instance.cpp +++ b/src/client/file_instance.cpp @@ -114,16 +114,16 @@ int FileInstance::Write(const char* buf, off_t offset, size_t len) { return iomanager4file_.Write(buf, offset, len, mdsclient_); } -int FileInstance::AioRead(CurveAioContext* aioctx) { - return iomanager4file_.AioRead(aioctx, mdsclient_); +int FileInstance::AioRead(CurveAioContext* aioctx, UserDataType dataType) { + return iomanager4file_.AioRead(aioctx, mdsclient_, dataType); } -int FileInstance::AioWrite(CurveAioContext* aioctx) { +int FileInstance::AioWrite(CurveAioContext* aioctx, UserDataType dataType) { if (readonly_) { DVLOG(9) << "open with read only, do not support write!"; return -1; } - return iomanager4file_.AioWrite(aioctx, mdsclient_); + return iomanager4file_.AioWrite(aioctx, mdsclient_, dataType); } // 两种场景会造成在Open的时候返回LIBCURVE_ERROR::FILE_OCCUPIED diff --git a/src/client/file_instance.h b/src/client/file_instance.h index 2e3db10f1c..9c0ac42b45 100644 --- a/src/client/file_instance.h +++ b/src/client/file_instance.h @@ -103,15 +103,17 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { /** * 异步模式读 * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * @param: dataType type of user buffer * @return: 0为成功,小于0为失败 */ - int AioRead(CurveAioContext* aioctx); + int AioRead(CurveAioContext* aioctx, UserDataType dataType); /** * 异步模式写 * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * @param: dataType type of user buffer * @return: 0为成功,小于0为失败 */ - int AioWrite(CurveAioContext* aioctx); + int AioWrite(CurveAioContext* aioctx, UserDataType dataType); int Close(); diff --git a/src/client/io_condition_varaiable.h b/src/client/io_condition_varaiable.h index 354184a74e..ce1b4eaed1 100644 --- a/src/client/io_condition_varaiable.h +++ b/src/client/io_condition_varaiable.h @@ -31,10 +31,8 @@ namespace client { // IOConditionVariable是用户同步IO场景下IO等待条件变量 class IOConditionVariable { public: - IOConditionVariable() { - ret = -1; - done_ = false; - } + IOConditionVariable() : retCode_(-1), done_(false), mtx_(), cv_() {} + ~IOConditionVariable() = default; /** @@ -44,7 +42,7 @@ class IOConditionVariable { */ void Complete(int retcode) { std::unique_lock lk(mtx_); - ret = retcode; + retCode_ = retcode; done_ = true; cv_.notify_one(); } @@ -52,16 +50,16 @@ class IOConditionVariable { /** * 是用户IO需要等待时候调用的函数,这个函数会在Complete被调用的时候返回 */ - int Wait() { + int Wait() { std::unique_lock lk(mtx_); - cv_.wait(lk, [&]()->bool {return done_;}); + cv_.wait(lk, [&]() { return done_; }); done_ = false; - return ret; + return retCode_; } private: // 当前IO的返回值 - int ret; + int retCode_; // 当前IO是否完成 bool done_; diff --git a/src/client/io_tracker.cpp b/src/client/io_tracker.cpp index 9e18a73ac9..6ec5237724 100644 --- a/src/client/io_tracker.cpp +++ b/src/client/io_tracker.cpp @@ -59,65 +59,119 @@ IOTracker::IOTracker(IOManager* iomanager, opStartTimePoint_ = curve::common::TimeUtility::GetTimeofDayUs(); } -void IOTracker::StartRead(CurveAioContext* aioctx, char* buf, - off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fi) { - data_ = buf; +void IOTracker::StartRead(void* buf, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fileInfo) { + data_ = buf; offset_ = offset; length_ = length; - aioctx_ = aioctx; - type_ = OpType::READ; + type_ = OpType::READ; + + DVLOG(9) << "read op, offset = " << offset << ", length = " << length; + + DoRead(mdsclient, fileInfo); +} - DVLOG(9) << "read op, offset = " << offset - << ", length = " << length; +void IOTracker::StartAioRead(CurveAioContext* ctx, MDSClient* mdsclient, + const FInfo_t* fileInfo) { + aioctx_ = ctx; + data_ = ctx->buf; + offset_ = ctx->offset; + length_ = ctx->length; + type_ = OpType::READ; - int ret = Splitor::IO2ChunkRequests(this, mc_, &reqlist_, data_, - offset_, length_, mdsclient, fi); + DVLOG(9) << "aioread op, offset = " << ctx->offset + << ", length = " << ctx->length; + + DoRead(mdsclient, fileInfo); +} + +void IOTracker::DoRead(MDSClient* mdsclient, const FInfo_t* fileInfo) { + int ret = Splitor::IO2ChunkRequests(this, mc_, &reqlist_, nullptr, offset_, + length_, mdsclient, fileInfo); if (ret == 0) { + PrepareReadIOBuffers(reqlist_.size()); + uint32_t subIoIndex = 0; + reqcount_.store(reqlist_.size(), std::memory_order_release); std::for_each(reqlist_.begin(), reqlist_.end(), [&](RequestContext* r) { r->done_->SetFileMetric(fileMetric_); r->done_->SetIOManager(iomanager_); + r->subIoIndex_ = subIoIndex++; }); + ret = scheduler_->ScheduleRequest(reqlist_); } else { LOG(ERROR) << "splitor read io failed, " - << "offset = " << offset_ - << ", length = " << length_; + << "offset = " << offset_ << ", length = " << length_; } if (ret == -1) { - LOG(ERROR) << "split or schedule failed, return and recyle resource!"; + LOG(ERROR) << "split or schedule failed, return and recycle resource!"; ReturnOnFail(); } } -void IOTracker::StartWrite(CurveAioContext* aioctx, const char* buf, - off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fi) { - data_ = buf; +void IOTracker::StartWrite(const void* buf, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fileInfo) { + data_ = const_cast(buf); offset_ = offset; length_ = length; - aioctx_ = aioctx; - type_ = OpType::WRITE; + type_ = OpType::WRITE; + + DVLOG(9) << "aiowrite op, offset = " << offset << ", length = " << length; + + DoWrite(mdsclient, fileInfo); +} + +void IOTracker::StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, + const FInfo_t* fileInfo) { + aioctx_ = ctx; + data_ = ctx->buf; + offset_ = ctx->offset; + length_ = ctx->length; + type_ = OpType::WRITE; + + DVLOG(9) << "aiowrite op, offset = " << ctx->offset + << ", length = " << ctx->length; + + DoWrite(mdsclient, fileInfo); +} + +void IOTracker::DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo) { + if (nullptr == data_) { + ReturnOnFail(); + return; + } + + switch (userDataType_) { + case UserDataType::RawBuffer: + writeData_.append_user_data(data_, length_, + TrivialDeleter); + break; + case UserDataType::IOBuffer: + writeData_ = *reinterpret_cast(data_); + break; + } - DVLOG(9) << "write op, offset = " << offset - << ", length = " << length; - int ret = Splitor::IO2ChunkRequests(this, mc_, &reqlist_, data_, offset_, - length_, mdsclient, fi); + int ret = Splitor::IO2ChunkRequests(this, mc_, &reqlist_, &writeData_, + offset_, length_, mdsclient, fileInfo); if (ret == 0) { + uint32_t subIoIndex = 0; + reqcount_.store(reqlist_.size(), std::memory_order_release); std::for_each(reqlist_.begin(), reqlist_.end(), [&](RequestContext* r) { r->done_->SetFileMetric(fileMetric_); r->done_->SetIOManager(iomanager_); + r->subIoIndex_ = subIoIndex++; }); ret = scheduler_->ScheduleRequest(reqlist_); } else { LOG(ERROR) << "splitor write io failed, " - << "offset = " << offset_ - << ", length = " << length_; + << "offset = " << offset_ << ", length = " << length_; } if (ret == -1) { - LOG(ERROR) << "split or schedule failed, return and recyle resource!"; + LOG(ERROR) << "split or schedule failed, return and recycle resource!"; ReturnOnFail(); } } @@ -133,16 +187,23 @@ void IOTracker::ReadSnapChunk(const ChunkIDInfo &cinfo, int ret = -1; do { - ret = Splitor::SingleChunkIO2ChunkRequests(this, mc_, &reqlist_, cinfo, - data_, offset_, length_, seq); + ret = Splitor::SingleChunkIO2ChunkRequests( + this, mc_, &reqlist_, cinfo, nullptr, offset_, length_, seq); if (ret == 0) { + PrepareReadIOBuffers(reqlist_.size()); + uint32_t subIoIndex = 0; reqcount_.store(reqlist_.size(), std::memory_order_release); + + for (auto& req : reqlist_) { + req->subIoIndex_ = subIoIndex++; + } + ret = scheduler_->ScheduleRequest(reqlist_); } } while (false); if (ret == -1) { - LOG(ERROR) << "split or schedule failed, return and recyle resource!"; + LOG(ERROR) << "split or schedule failed, return and recycle resource!"; ReturnOnFail(); } } @@ -169,7 +230,7 @@ void IOTracker::DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, if (ret == -1) { LOG(ERROR) << "DeleteSnapChunkOrCorrectSn request schedule failed," - << "return and recyle resource!"; + << "return and recycle resource!"; ReturnOnFail(); } } @@ -196,7 +257,7 @@ void IOTracker::GetChunkInfo(const ChunkIDInfo &cinfo, if (ret == -1) { LOG(ERROR) << "GetChunkInfo request schedule failed," - << " return and recyle resource!"; + << " return and recycle resource!"; ReturnOnFail(); } } @@ -229,7 +290,7 @@ void IOTracker::CreateCloneChunk(const std::string& location, if (ret == -1) { LOG(ERROR) << "CreateCloneChunk request schedule failed," - << "return and recyle resource!"; + << "return and recycle resource!"; ReturnOnFail(); } } @@ -258,7 +319,7 @@ void IOTracker::RecoverChunk(const ChunkIDInfo& cinfo, uint64_t offset, if (ret == -1) { LOG(ERROR) << "RecoverChunk request schedule failed," - << " return and recyle resource!"; + << " return and recycle resource!"; ReturnOnFail(); } } @@ -276,6 +337,11 @@ void IOTracker::HandleResponse(RequestContext* reqctx) { &errcode_); } + // copy read data + if (OpType::READ == type_ || OpType::READ_SNAP == type_) { + SetReadData(reqctx->subIoIndex_, reqctx->readData_); + } + if (1 == reqcount_.fetch_sub(1, std::memory_order_acq_rel)) { Done(); } @@ -290,6 +356,40 @@ void IOTracker::Done() { uint64_t duration = TimeUtility::GetTimeofDayUs() - opStartTimePoint_; MetricHelper::UserLatencyRecord(fileMetric_, duration, type_); MetricHelper::IncremUserQPSCount(fileMetric_, length_, type_); + + // copy read data to user buffer + if (OpType::READ == type_ || OpType::READ_SNAP == type_) { + butil::IOBuf readData; + for (const auto& buf : readDatas_) { + readData.append(buf); + } + + switch (userDataType_) { + case UserDataType::RawBuffer: { + size_t nc = readData.copy_to(data_, readData.size()); + if (nc != length_) { + errcode_ = LIBCURVE_ERROR::FAILED; + } + break; + } + case UserDataType::IOBuffer: { + butil::IOBuf* userData = + reinterpret_cast(data_); + *userData = readData; + if (userData->size() != length_) { + errcode_ = LIBCURVE_ERROR::FAILED; + } + break; + } + } + + if (errcode_ != LIBCURVE_ERROR::OK) { + LOG(ERROR) << "IO Error, copy data to read buffer failed, " + << ", filename: " << fileMetric_->filename + << ", offset: " << offset_ + << ", length: " << length_; + } + } } else { MetricHelper::IncremUserEPSCount(fileMetric_, type_); if (type_ == OpType::READ || type_ == OpType::WRITE) { diff --git a/src/client/io_tracker.h b/src/client/io_tracker.h index 4ee854122a..c48101b841 100644 --- a/src/client/io_tracker.h +++ b/src/client/io_tracker.h @@ -22,10 +22,12 @@ #ifndef SRC_CLIENT_IO_TRACKER_H_ #define SRC_CLIENT_IO_TRACKER_H_ +#include + #include -#include #include #include +#include #include "src/client/metacache.h" #include "src/client/mds_client.h" @@ -60,28 +62,44 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { ~IOTracker() = default; /** - * startread和startwrite将上层的同步和异步读写接口统一了 - * CurveAioContext传入的为空值的时候,代表这个读写是同步, - * 否则是异步的,MDSClient和FInfo_t透传给splitor。 - * @param: aioctx异步io上下文,为空的时候代表同步IO - * @param: buf是读写缓冲区 - * @param: offset是读写偏移 - * @param: length是读写长度 - * @param: mdsclient透传给splitor,与mds通信 - * @param: fi是当前io对应文件的基本信息 + * @brief StartRead同步读 + * @param buf 读缓冲区 + * @param offset 读偏移 + * @param length 读长度 + * @param mdsclient 透传给splitor,与mds通信 + * @param fileInfo 当前io对应文件的基本信息 + */ + void StartRead(void* buf, off_t offset, size_t length, MDSClient* mdsclient, + const FInfo_t* fileInfo); + + /** + * @brief StartWrite同步写 + * @param buf 写缓冲区 + * @param offset 写偏移 + * @param length 写长度 + * @param mdsclient 透传给splitor,与mds通信 + * @param fileInfo 当前io对应文件的基本信息 */ - void StartRead(CurveAioContext* aioctx, - char* buf, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi); - void StartWrite(CurveAioContext* aioctx, - const char* buf, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi); + void StartWrite(const void* buf, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fileInfo); + + /** + * @brief start an async read operation + * @param ctx async read context + * @param mdsclient used to communicate with MDS + * @param fileInfo current file info + */ + void StartAioRead(CurveAioContext* ctx, MDSClient* mdsclient, + const FInfo_t* fileInfo); + + /** + * @brief start an async write operation + * @param ctx async write context + * @param mdsclient used to communicate with MDS + * @param fileInfo current file info + */ + void StartAioWrite(CurveAioContext* ctx, MDSClient* mdsclient, + const FInfo_t* fileInfo); /** * chunk相关接口是提供给snapshot使用的,上层的snapshot和file * 接口是分开的,在IOTracker这里会将其统一,这样对下层来说不用 @@ -168,9 +186,26 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { /** * 获取当前tracker id信息 */ - uint64_t GetID() { + uint64_t GetID() const { return id_; - } + } + + // set user data type + void SetUserDataType(const UserDataType dataType) { + userDataType_ = dataType; + } + + /** + * @brief prepare space to store read data + * @param subIoCount #space to store read data + */ + void PrepareReadIOBuffers(const uint32_t subIoCount) { + readDatas_.resize(subIoCount); + } + + void SetReadData(const uint32_t subIoIndex, const butil::IOBuf& data) { + readDatas_[subIoIndex] = data; + } private: /** @@ -209,6 +244,12 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { */ RequestContext* GetInitedRequestContext() const; + // perform read operation + void DoRead(MDSClient* mdsclient, const FInfo_t* fileInfo); + + // perform write operation + void DoWrite(MDSClient* mdsclient, const FInfo_t* fileInfo); + private: // io 类型 OpType type_; @@ -216,7 +257,18 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // 当前IO的数据内容,data是读写数据的buffer off_t offset_; uint64_t length_; - mutable const char* data_; + + // user data pointer + void* data_; + + // user data type + UserDataType userDataType_; + + // save write data + butil::IOBuf writeData_; + + // save read data + std::vector readDatas_; // 当用户下发的是同步IO的时候,其需要在上层进行等待,因为client的 // IO发送流程全部是异步的,因此这里需要用条件变量等待,待异步IO返回 @@ -234,7 +286,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { std::atomic reqcount_; // 大IO被拆分成多个request,这些request放在reqlist中国保存 - std::list reqlist_; + std::vector reqlist_; // scheduler用来将用户线程与client自己的线程切分 // 大IO被切分之后,将切分的reqlist传给scheduler向下发送 diff --git a/src/client/iomanager4chunk.cpp b/src/client/iomanager4chunk.cpp index ce1edbc1ad..744c009381 100644 --- a/src/client/iomanager4chunk.cpp +++ b/src/client/iomanager4chunk.cpp @@ -53,11 +53,14 @@ void IOManager4Chunk::UnInitialize() { scheduler_ = nullptr; } -int IOManager4Chunk::ReadSnapChunk(const ChunkIDInfo &chunkidinfo, - uint64_t seq, uint64_t offset, uint64_t len, - char *buf, SnapCloneClosure* scc) { - +int IOManager4Chunk::ReadSnapChunk(const ChunkIDInfo& chunkidinfo, + uint64_t seq, + uint64_t offset, + uint64_t len, + char* buf, + SnapCloneClosure* scc) { IOTracker* temp = new IOTracker(this, &mc_, scheduler_); + temp->SetUserDataType(UserDataType::RawBuffer); temp->ReadSnapChunk(chunkidinfo, seq, offset, len, buf, scc); return 0; } diff --git a/src/client/iomanager4file.cpp b/src/client/iomanager4file.cpp index e7b02ebe73..41785d2d2a 100644 --- a/src/client/iomanager4file.cpp +++ b/src/client/iomanager4file.cpp @@ -130,28 +130,42 @@ int IOManager4File::Read(char* buf, off_t offset, MetricHelper::IncremUserRPSCount(fileMetric_, OpType::READ); FlightIOGuard guard(this); + butil::IOBuf data; + IOTracker temp(this, &mc_, scheduler_, fileMetric_); - temp.StartRead(nullptr, buf, offset, length, mdsclient, - this->GetFileInfo()); + temp.SetUserDataType(UserDataType::IOBuffer); + temp.StartRead(&data, offset, length, mdsclient, this->GetFileInfo()); int rc = temp.Wait(); - return rc; + + if (rc < 0) { + return rc; + } else { + size_t nc = data.copy_to(buf, length); + return nc == length ? rc : -LIBCURVE_ERROR::FAILED; + } } -int IOManager4File::Write(const char* buf, off_t offset, - size_t length, MDSClient* mdsclient) { +int IOManager4File::Write(const char* buf, + off_t offset, + size_t length, + MDSClient* mdsclient) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::WRITE); FlightIOGuard guard(this); + butil::IOBuf data; + data.append_user_data(const_cast(buf), length, TrivialDeleter); + IOTracker temp(this, &mc_, scheduler_, fileMetric_); - temp.StartWrite(nullptr, buf, offset, length, mdsclient, - this->GetFileInfo()); + temp.SetUserDataType(UserDataType::IOBuffer); + temp.StartWrite(&data, offset, length, mdsclient, this->GetFileInfo()); int rc = temp.Wait(); return rc; } -int IOManager4File::AioRead(CurveAioContext* ctx, MDSClient* mdsclient) { +int IOManager4File::AioRead(CurveAioContext* ctx, MDSClient* mdsclient, + UserDataType dataType) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::READ); IOTracker* temp = new (std::nothrow) IOTracker(this, &mc_, @@ -163,18 +177,18 @@ int IOManager4File::AioRead(CurveAioContext* ctx, MDSClient* mdsclient) { return LIBCURVE_ERROR::OK; } + temp->SetUserDataType(dataType); inflightCntl_.IncremInflightNum(); auto task = [this, ctx, mdsclient, temp]() { - temp->StartRead(ctx, static_cast(ctx->buf), - ctx->offset, ctx->length, mdsclient, - this->GetFileInfo()); + temp->StartAioRead(ctx, mdsclient, this->GetFileInfo()); }; taskPool_.Enqueue(task); return LIBCURVE_ERROR::OK; } -int IOManager4File::AioWrite(CurveAioContext* ctx, MDSClient* mdsclient) { +int IOManager4File::AioWrite(CurveAioContext* ctx, MDSClient* mdsclient, + UserDataType dataType) { MetricHelper::IncremUserRPSCount(fileMetric_, OpType::WRITE); IOTracker* temp = new (std::nothrow) IOTracker(this, &mc_, @@ -186,11 +200,10 @@ int IOManager4File::AioWrite(CurveAioContext* ctx, MDSClient* mdsclient) { return LIBCURVE_ERROR::OK; } + temp->SetUserDataType(dataType); inflightCntl_.IncremInflightNum(); auto task = [this, ctx, mdsclient, temp]() { - temp->StartWrite(ctx, static_cast(ctx->buf), - ctx->offset, ctx->length, mdsclient, - this->GetFileInfo()); + temp->StartAioWrite(ctx, mdsclient, this->GetFileInfo()); }; taskPool_.Enqueue(task); diff --git a/src/client/iomanager4file.h b/src/client/iomanager4file.h index c0824e5821..adb6d1b4a3 100644 --- a/src/client/iomanager4file.h +++ b/src/client/iomanager4file.h @@ -23,225 +23,230 @@ #ifndef SRC_CLIENT_IOMANAGER4FILE_H_ #define SRC_CLIENT_IOMANAGER4FILE_H_ -#include #include -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT +#include -#include "src/common/concurrent/concurrent.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/client/metacache.h" +#include "include/curve_compiler_specific.h" +#include "src/client/client_common.h" +#include "src/client/inflight_controller.h" #include "src/client/iomanager.h" #include "src/client/mds_client.h" -#include "src/client/client_common.h" +#include "src/client/metacache.h" #include "src/client/request_scheduler.h" -#include "include/curve_compiler_specific.h" -#include "src/client/inflight_controller.h" - -using curve::common::Atomic; +#include "src/common/concurrent/concurrent.h" +#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace client { class FlightIOGuard; class IOManager4File : public IOManager { public: - IOManager4File(); - ~IOManager4File() = default; - - /** - * 初始化函数 - * @param: filename为当前iomanager服务的文件名 - * @param: ioopt为当前iomanager的配置信息 - * @param: mdsclient向下透传给metacache - * @return: 成功true,失败false - */ - bool Initialize(const std::string& filename, - const IOOption_t& ioOpt, - MDSClient* mdsclient); - - /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @return: 成功返回读取真实长度,-1为失败 - */ - int Read(char* buf, off_t offset, size_t length, MDSClient* mdsclient); - /** - * 同步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 - */ - int Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient); - /** - * 异步模式读 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 0为成功,小于0为失败 - */ - int AioRead(CurveAioContext* aioctx, - MDSClient* mdsclient); - /** - * 异步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 0为成功,小于0为失败 - */ - int AioWrite(CurveAioContext* aioctx, - MDSClient* mdsclient); - - /** - * 析构,回收资源 - */ - void UnInitialize(); - - /** - * @brief 获取rpc发送令牌 - */ - void GetInflightRpcToken() override; - - /** - * @brief 释放rpc发送令牌 - */ - void ReleaseInflightRpcToken() override; - - /** - * 获取metacache,测试代码使用 - */ - MetaCache* GetMetaCache() {return &mc_;} - /** - * 设置scahuler,测试代码使用 - */ - void SetRequestScheduler(RequestScheduler* scheduler) { - scheduler_ = scheduler; - } - - /** - * 获取metric信息,测试代码使用 - */ - FileMetric* GetMetric() { - return fileMetric_; - } - - /** - * 重新设置io配置信息,测试使用 - */ - void SetIOOpt(const IOOption_t& opt) { - ioopt_ = opt; - } - - /** - * 测试使用,获取request scheduler - */ - RequestScheduler* GetScheduler() { return scheduler_; } - - /** - * lease excutor在检查到版本更新的时候,需要通知iomanager更新文件版本信息 - * @param: fi为当前需要更新的文件信息 - */ - void UpdateFileInfo(const FInfo_t& fi); - - const FInfo* GetFileInfo() const { - return mc_.GetFileInfo(); - } - - /** - * 返回文件最新版本号 - */ - uint64_t GetLatestFileSn() const { - return mc_.GetLatestFileSn(); - } - - /** - * 更新文件最新版本号 - */ - void SetLatestFileSn(uint64_t newSn) { - mc_.SetLatestFileSn(newSn); - } + IOManager4File(); + ~IOManager4File() = default; + + /** + * 初始化函数 + * @param: filename为当前iomanager服务的文件名 + * @param: ioopt为当前iomanager的配置信息 + * @param: mdsclient向下透传给metacache + * @return: 成功true,失败false + */ + bool Initialize(const std::string& filename, const IOOption_t& ioOpt, + MDSClient* mdsclient); + + /** + * 同步模式读 + * @param: buf为当前待读取的缓冲区 + * @param:offset文件内的便宜 + * @parma:length为待读取的长度 + * @param: mdsclient透传给底层,在必要的时候与mds通信 + * @return: 成功返回读取真实长度,-1为失败 + */ + int Read(char* buf, off_t offset, size_t length, MDSClient* mdsclient); + /** + * 同步模式写 + * @param: mdsclient透传给底层,在必要的时候与mds通信 + * @param: buf为当前待写入的缓冲区 + * @param:offset文件内的便宜 + * @param:length为待读取的长度 + * @return: 成功返回写入真实长度,-1为失败 + */ + int Write(const char* buf, off_t offset, size_t length, + MDSClient* mdsclient); + /** + * 异步模式读 + * @param: mdsclient透传给底层,在必要的时候与mds通信 + * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * @param dataType type of aioctx->buf + * @return: 0为成功,小于0为失败 + */ + int AioRead(CurveAioContext* aioctx, MDSClient* mdsclient, + UserDataType dataType); + /** + * 异步模式写 + * @param: mdsclient透传给底层,在必要的时候与mds通信 + * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * @param dataType type of aioctx->buf + * @return: 0为成功,小于0为失败 + */ + int AioWrite(CurveAioContext* aioctx, MDSClient* mdsclient, + UserDataType dataType); + + /** + * 析构,回收资源 + */ + void UnInitialize(); + + /** + * @brief 获取rpc发送令牌 + */ + void GetInflightRpcToken() override; + + /** + * @brief 释放rpc发送令牌 + */ + void ReleaseInflightRpcToken() override; + + /** + * 获取metacache,测试代码使用 + */ + MetaCache* GetMetaCache() { + return &mc_; + } + /** + * 设置scahuler,测试代码使用 + */ + void SetRequestScheduler(RequestScheduler* scheduler) { + scheduler_ = scheduler; + } - private: - friend class LeaseExecutor; - friend class FlightIOGuard; - /** - * lease相关接口,当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 将新下发的IO全部失败返回 - */ - void LeaseTimeoutBlockIO(); - - /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO - */ - void RefeshSuccAndResumeIO(); - - /** - * 当lesaeexcutor发现版本变更,调用该接口开始等待inflight回来,这段期间IO是hang的 - */ - void BlockIO(); - - /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: iotracker是返回的异步io - */ - void HandleAsyncIOResponse(IOTracker* iotracker) override; - - class FlightIOGuard { - public: - explicit FlightIOGuard(IOManager4File* iomana) { - iomanager = iomana; - iomanager->inflightCntl_.IncremInflightNum(); + /** + * 获取metric信息,测试代码使用 + */ + FileMetric* GetMetric() { + return fileMetric_; + } + + /** + * 重新设置io配置信息,测试使用 + */ + void SetIOOpt(const IOOption_t& opt) { + ioopt_ = opt; + } + + /** + * 测试使用,获取request scheduler + */ + RequestScheduler* GetScheduler() { + return scheduler_; } - ~FlightIOGuard() { - iomanager->inflightCntl_.DecremInflightNum(); + /** + * lease excutor在检查到版本更新的时候,需要通知iomanager更新文件版本信息 + * @param: fi为当前需要更新的文件信息 + */ + void UpdateFileInfo(const FInfo_t& fi); + + const FInfo* GetFileInfo() const { + return mc_.GetFileInfo(); + } + + /** + * 返回文件最新版本号 + */ + uint64_t GetLatestFileSn() const { + return mc_.GetLatestFileSn(); } - private: - IOManager4File* iomanager; - }; + /** + * 更新文件最新版本号 + */ + void SetLatestFileSn(uint64_t newSn) { + mc_.SetLatestFileSn(newSn); + } private: - // 每个IOManager都有其IO配置,保存在iooption里 - IOOption_t ioopt_; + friend class LeaseExecutor; + friend class FlightIOGuard; + /** + * lease相关接口,当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO + * 将新下发的IO全部失败返回 + */ + void LeaseTimeoutBlockIO(); + + /** + * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO + */ + void RefeshSuccAndResumeIO(); + + /** + * 当lesaeexcutor发现版本变更,调用该接口开始等待inflight回来,这段期间IO是hang的 + */ + void BlockIO(); + + /** + * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO + * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, + * HandleAsyncIOResponse负责释放IOTracker + * @param: iotracker是返回的异步io + */ + void HandleAsyncIOResponse(IOTracker* iotracker) override; + + class FlightIOGuard { + public: + explicit FlightIOGuard(IOManager4File* iomana) { + iomanager = iomana; + iomanager->inflightCntl_.IncremInflightNum(); + } + + ~FlightIOGuard() { + iomanager->inflightCntl_.DecremInflightNum(); + } + + private: + IOManager4File* iomanager; + }; - // metacache存储当前文件的所有元数据信息 - MetaCache mc_; + private: + // 每个IOManager都有其IO配置,保存在iooption里 + IOOption_t ioopt_; + + // metacache存储当前文件的所有元数据信息 + MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 - RequestScheduler* scheduler_; + // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + RequestScheduler* scheduler_; - // client端metric统计信息 - FileMetric* fileMetric_; + // client端metric统计信息 + FileMetric* fileMetric_; - // task thread pool为了将qemu线程与curve线程隔离 - curve::common::TaskThreadPool taskPool_; + // task thread pool为了将qemu线程与curve线程隔离 + curve::common::TaskThreadPool taskPool_; - // inflight IO控制 - InflightControl inflightCntl_; + // inflight IO控制 + InflightControl inflightCntl_; - // inflight rpc控制 - InflightControl inflightRpcCntl_; + // inflight rpc控制 + InflightControl inflightRpcCntl_; - // 是否退出 - bool exit_; + // 是否退出 + bool exit_; - // lease续约线程与qemu一侧线程调用是并发的 - // qemu在调用close的时候会关闭iomanager及其对应 - // 资源。lease续约线程在续约成功或失败的时候会通知iomanager的 - // scheduler线程现在需要block IO或者resume IO,所以 - // 如果在lease续约线程需要通知iomanager的时候,这时候 - // 如果iomanager的资源scheduler已经被释放了,就会 - // 导致crash,所以需要对这个资源加一把锁,在退出的时候 - // 不会有并发的情况,保证在资源被析构的时候lease续约 - // 线程不会再用到这些资源. - std::mutex exitMtx_; + // lease续约线程与qemu一侧线程调用是并发的 + // qemu在调用close的时候会关闭iomanager及其对应 + // 资源。lease续约线程在续约成功或失败的时候会通知iomanager的 + // scheduler线程现在需要block IO或者resume IO,所以 + // 如果在lease续约线程需要通知iomanager的时候,这时候 + // 如果iomanager的资源scheduler已经被释放了,就会 + // 导致crash,所以需要对这个资源加一把锁,在退出的时候 + // 不会有并发的情况,保证在资源被析构的时候lease续约 + // 线程不会再用到这些资源. + std::mutex exitMtx_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve + #endif // SRC_CLIENT_IOMANAGER4FILE_H_ diff --git a/src/client/libcurve_client.cpp b/src/client/libcurve_client.cpp index 3c3e2aeb36..3c0f6cf234 100644 --- a/src/client/libcurve_client.cpp +++ b/src/client/libcurve_client.cpp @@ -108,12 +108,14 @@ int64_t CurveClient::StatFile(const std::string& filename) { return rc == LIBCURVE_ERROR::OK ? fileStatInfo.length : rc; } -int CurveClient::AioRead(int fd, CurveAioContext* aioctx) { - return fileClient_->AioRead(fd, aioctx); +int CurveClient::AioRead(int fd, CurveAioContext* aioctx, + UserDataType dataType) { + return fileClient_->AioRead(fd, aioctx, dataType); } -int CurveClient::AioWrite(int fd, CurveAioContext* aioctx) { - return fileClient_->AioWrite(fd, aioctx); +int CurveClient::AioWrite(int fd, CurveAioContext* aioctx, + UserDataType dataType) { + return fileClient_->AioWrite(fd, aioctx, dataType); } void CurveClient::SetFileClient(FileClient* client) { diff --git a/src/client/libcurve_file.cpp b/src/client/libcurve_file.cpp index 81ef70c8ca..71743f8045 100644 --- a/src/client/libcurve_file.cpp +++ b/src/client/libcurve_file.cpp @@ -309,7 +309,8 @@ int FileClient::Write(int fd, const char* buf, off_t offset, size_t len) { return fileserviceMap_[fd]->Write(buf, offset, len); } -int FileClient::AioRead(int fd, CurveAioContext* aioctx) { +int FileClient::AioRead(int fd, CurveAioContext* aioctx, + UserDataType dataType) { // 长度为0,直接返回,不做任何操作 if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; @@ -325,13 +326,14 @@ int FileClient::AioRead(int fd, CurveAioContext* aioctx) { LOG(ERROR) << "invalid fd!"; ret = -LIBCURVE_ERROR::BAD_FD; } else { - ret = fileserviceMap_[fd]->AioRead(aioctx); + ret = fileserviceMap_[fd]->AioRead(aioctx, dataType); } return ret; } -int FileClient::AioWrite(int fd, CurveAioContext* aioctx) { +int FileClient::AioWrite(int fd, CurveAioContext* aioctx, + UserDataType dataType) { // 长度为0,直接返回,不做任何操作 if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; @@ -347,7 +349,7 @@ int FileClient::AioWrite(int fd, CurveAioContext* aioctx) { LOG(ERROR) << "invalid fd!"; ret = -LIBCURVE_ERROR::BAD_FD; } else { - ret = fileserviceMap_[fd]->AioWrite(aioctx); + ret = fileserviceMap_[fd]->AioWrite(aioctx, dataType); } return ret; diff --git a/src/client/libcurve_file.h b/src/client/libcurve_file.h index 58c4c1b3d8..74d47b4fac 100644 --- a/src/client/libcurve_file.h +++ b/src/client/libcurve_file.h @@ -137,17 +137,21 @@ class FileClient { * 异步模式读 * @param: fd为当前open返回的文件描述符 * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` * @return: 成功返回读取字节数,否则返回小于0的错误码 */ - virtual int AioRead(int fd, CurveAioContext* aioctx); + virtual int AioRead(int fd, CurveAioContext* aioctx, + UserDataType dataType = UserDataType::RawBuffer); /** * 异步模式写 * @param: fd为当前open返回的文件描述符 * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` * @return: 成功返回写入字节数,否则返回小于0的错误码 */ - virtual int AioWrite(int fd, CurveAioContext* aioctx); + virtual int AioWrite(int fd, CurveAioContext* aioctx, + UserDataType dataType = UserDataType::RawBuffer); /** * 重命名文件 diff --git a/src/client/request_context.cpp b/src/client/request_context.cpp index 76e06fc9ef..5732c4525a 100644 --- a/src/client/request_context.cpp +++ b/src/client/request_context.cpp @@ -20,38 +20,12 @@ * Author: tongguangxun */ -#include -#include - #include "src/client/request_context.h" -#include "src/client/request_closure.h" namespace curve { namespace client { -std::atomic RequestContext::reqCtxID_(1); - -RequestContext::RequestContext() { - readBuffer_ = nullptr; - writeBuffer_ = nullptr; - chunkinfodetail_ = nullptr; - - id_ = reqCtxID_.fetch_add(1); - - seq_ = 0; - offset_ = 0; - rawlength_ = 0; - - appliedindex_ = 0; -} -bool RequestContext::Init() { - done_ = new (std::nothrow) RequestClosure(this); - return done_ != nullptr; -} - -void RequestContext::UnInit() { - delete done_; -} +std::atomic RequestContext::requestId(0); } // namespace client } // namespace curve diff --git a/src/client/request_context.h b/src/client/request_context.h index 2ad7bd35ca..f6ba3c3aae 100644 --- a/src/client/request_context.h +++ b/src/client/request_context.h @@ -23,6 +23,8 @@ #ifndef SRC_CLIENT_REQUEST_CONTEXT_H_ #define SRC_CLIENT_REQUEST_CONTEXT_H_ +#include + #include #include @@ -52,49 +54,66 @@ inline std::ostream& operator<<(std::ostream& os, return os; } -class RequestContext { - public: - RequestContext(); +struct RequestContext { + RequestContext() : id_(GetNextRequestContextId()) {} + ~RequestContext() = default; - bool Init(); - void UnInit(); + + bool Init() { + done_ = new (std::nothrow) RequestClosure(this); + return done_ != nullptr; + } + + void UnInit() { + delete done_; + done_ = nullptr; + } // chunk的ID信息,sender在发送rpc的时候需要附带其ID信息 ChunkIDInfo idinfo_; // 用户IO被拆分之后,其小IO有自己的offset和length - off_t offset_; - OpType optype_; - size_t rawlength_; + off_t offset_ = 0; + OpType optype_ = OpType::UNKNOWN; + size_t rawlength_ = 0; - // 当前IO的数据,读请求时数据在readbuffer,写请求在writebuffer - char* readBuffer_; - const char* writeBuffer_; + // user's single io request will split into several requests + // subIoIndex_ is an index of serveral requests + uint32_t subIoIndex_ = 0; + + // read data of current request + butil::IOBuf readData_; + + // write data of current request + butil::IOBuf writeData_; // 因为RPC都是异步发送,因此在一个Request结束时,RPC回调调用当前的done // 来告知当前的request结束了 - RequestClosure* done_; + RequestClosure* done_ = nullptr; // request的版本信息 - uint64_t seq_; + uint64_t seq_ = 0; // appliedindex_表示当前IO是否走chunkserver端的raft协议,为0的时候走raft - uint64_t appliedindex_; + uint64_t appliedindex_ = 0; // 这个对应的GetChunkInfo的出参 - ChunkInfoDetail* chunkinfodetail_; + ChunkInfoDetail* chunkinfodetail_ = nullptr; // clone chunk请求需要携带源chunk的location及所需要创建的chunk的大小 - uint32_t chunksize_; + uint32_t chunksize_ = 0; std::string location_; RequestSourceInfo sourceInfo_; // create clone chunk时候用于修改chunk的correctedSn - uint64_t correctedSeq_; + uint64_t correctedSeq_ = 0; // 当前request context id - uint64_t id_; + uint64_t id_ = 0; + + static std::atomic requestId; - // request context id生成器 - static std::atomic reqCtxID_; + static uint64_t GetNextRequestContextId() { + return requestId.fetch_add(1, std::memory_order_relaxed); + } }; inline std::ostream& operator<<(std::ostream& os, @@ -104,6 +123,7 @@ inline std::ostream& operator<<(std::ostream& os, << ", chunk id = " << reqCtx.idinfo_.cid_ << ", offset = " << reqCtx.offset_ << ", length = " << reqCtx.rawlength_ + << ", sub-io index = " << reqCtx.subIoIndex_ << ", sn = " << reqCtx.seq_ << ", source info = " << reqCtx.sourceInfo_; @@ -112,4 +132,5 @@ inline std::ostream& operator<<(std::ostream& os, } // namespace client } // namespace curve + #endif // SRC_CLIENT_REQUEST_CONTEXT_H_ diff --git a/src/client/request_scheduler.cpp b/src/client/request_scheduler.cpp index 1528e0e37b..9099917841 100644 --- a/src/client/request_scheduler.cpp +++ b/src/client/request_scheduler.cpp @@ -87,7 +87,8 @@ int RequestScheduler::Fini() { return 0; } -int RequestScheduler::ScheduleRequest(const std::list requests) { //NOLINT +int RequestScheduler::ScheduleRequest( + const std::vector& requests) { if (running_.load(std::memory_order_acquire)) { /* TODO(wudemiao): 后期考虑 qos */ for (auto it : requests) { @@ -143,31 +144,23 @@ void RequestScheduler::Process() { brpc::ClosureGuard guard(req->done_); switch (req->optype_) { case OpType::READ: - DVLOG(9) << "Processing read request, buf header: " - << " buf: " << *(unsigned int*)req->readBuffer_; { req->done_->GetInflightRPCToken(); - client_.ReadChunk(req->idinfo_, - req->seq_, - req->offset_, - req->rawlength_, - req->appliedindex_, - req->sourceInfo_, - guard.release()); + client_.ReadChunk(req->idinfo_, req->seq_, req->offset_, + req->rawlength_, req->appliedindex_, + req->sourceInfo_, guard.release()); } break; case OpType::WRITE: DVLOG(9) << "Processing write request, buf header: " - << " buf: " << *(unsigned int*)req->writeBuffer_; + << " buf: " + << *(unsigned int*)(req->writeData_.fetch1()); { req->done_->GetInflightRPCToken(); - client_.WriteChunk(req->idinfo_, - req->seq_, - req->writeBuffer_, - req->offset_, - req->rawlength_, - req->sourceInfo_, - guard.release()); + client_.WriteChunk(req->idinfo_, req->seq_, + req->writeData_, req->offset_, + req->rawlength_, req->sourceInfo_, + guard.release()); } break; case OpType::READ_SNAP: diff --git a/src/client/request_scheduler.h b/src/client/request_scheduler.h index e6cd145d54..96153b0e4d 100644 --- a/src/client/request_scheduler.h +++ b/src/client/request_scheduler.h @@ -23,7 +23,7 @@ #ifndef SRC_CLIENT_REQUEST_SCHEDULER_H_ #define SRC_CLIENT_REQUEST_SCHEDULER_H_ -#include +#include #include "src/common/uncopyable.h" #include "src/client/config_info.h" @@ -85,7 +85,7 @@ class RequestScheduler : public Uncopyable { * @param requests:请求列表 * @return 0成功,-1失败 */ - virtual int ScheduleRequest(const std::list requests); + virtual int ScheduleRequest(const std::vector& requests); /** * 将request push到Scheduler处理 diff --git a/src/client/request_sender.cpp b/src/client/request_sender.cpp index 590f46c3e3..5e0d05344d 100644 --- a/src/client/request_sender.cpp +++ b/src/client/request_sender.cpp @@ -35,8 +35,6 @@ using curve::common::TimeUtility; namespace curve { namespace client { -static void EmptyDeleter(void* ptr) {} - int RequestSender::Init(const IOSenderOption_t& ioSenderOpt) { if (0 != channel_.Init(serverEndPoint_, NULL)) { LOG(ERROR) << "failed to init channel to server, id: " << chunkServerId_ @@ -96,7 +94,7 @@ int RequestSender::ReadChunk(ChunkIDInfo idinfo, int RequestSender::WriteChunk(ChunkIDInfo idinfo, uint64_t sn, - const char *buf, + const butil::IOBuf& data, off_t offset, size_t length, const RequestSourceInfo& sourceInfo, @@ -108,11 +106,11 @@ int RequestSender::WriteChunk(ChunkIDInfo idinfo, rc->SetStartTime(TimeUtility::GetTimeofDayUs()); DVLOG(9) << "Sending request, buf header: " - << " buf: " << *(unsigned int *)buf; + << " buf: " << *(unsigned int *)(data.fetch1()); brpc::Controller *cntl = new brpc::Controller(); cntl->set_timeout_ms( - std::max(rc->GetNextTimeoutMS(), - iosenderopt_.failRequestOpt.chunkserverRPCTimeoutMS)); + std::max(rc->GetNextTimeoutMS(), + iosenderopt_.failRequestOpt.chunkserverRPCTimeoutMS)); done->SetCntl(cntl); ChunkResponse *response = new ChunkResponse(); done->SetResponse(response); @@ -133,8 +131,7 @@ int RequestSender::WriteChunk(ChunkIDInfo idinfo, request.set_clonefileoffset(sourceInfo.cloneFileOffset); } - cntl->request_attachment().append_user_data( - const_cast(buf), length, EmptyDeleter); + cntl->request_attachment().append(data); ChunkService_Stub stub(&channel_); stub.WriteChunk(cntl, &request, response, doneGuard.release()); diff --git a/src/client/request_sender.h b/src/client/request_sender.h index e8d212c805..edcd742ac5 100644 --- a/src/client/request_sender.h +++ b/src/client/request_sender.h @@ -25,6 +25,7 @@ #include #include +#include #include @@ -81,7 +82,7 @@ class RequestSender { * 写Chunk * @param idinfo为chunk相关的id信息 * @param sn:文件版本号 - * @param buf:要写入的数据 + * @param data 要写入的数据 *@param offset:写的偏移 * @param length:写的长度 * @param sourceInfo 数据源信息 @@ -89,7 +90,7 @@ class RequestSender { */ int WriteChunk(ChunkIDInfo idinfo, uint64_t sn, - const char *buf, + const butil::IOBuf& data, off_t offset, size_t length, const RequestSourceInfo& sourceInfo, diff --git a/src/client/splitor.cpp b/src/client/splitor.cpp index 7e1d6c7e26..ae4a26bedd 100644 --- a/src/client/splitor.cpp +++ b/src/client/splitor.cpp @@ -35,23 +35,27 @@ namespace curve { namespace client { IOSplitOPtion_t Splitor::iosplitopt_; + void Splitor::Init(IOSplitOPtion_t ioSplitOpt) { iosplitopt_ = ioSplitOpt; LOG(INFO) << "io splitor init success!"; } -int Splitor::IO2ChunkRequests(IOTracker* iotracker, - MetaCache* mc, - std::list* targetlist, - const char* data, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi) { - if (targetlist == nullptr|| data == nullptr || mdsclient == nullptr || + +int Splitor::IO2ChunkRequests(IOTracker* iotracker, MetaCache* mc, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fi) { + if (targetlist == nullptr || mdsclient == nullptr || mc == nullptr || iotracker == nullptr || fi == nullptr) { return -1; } + if (iotracker->Optype() == OpType::WRITE && data == nullptr) { + return -1; + } + + targetlist->reserve(length / (iosplitopt_.fileIOSplitMaxSizeKB * 1024) + 1); + uint64_t chunksize = fi->chunksize; uint64_t startchunkindex = offset / chunksize; @@ -77,7 +81,7 @@ int Splitor::IO2ChunkRequests(IOTracker* iotracker, << ", chunkindex = " << startchunkindex << ", endchunkindex = " << endchunkindex; - if (!AssignInternal(iotracker, mc, targetlist, data + dataoff, + if (!AssignInternal(iotracker, mc, targetlist, data, off, len, mdsclient, fi, startchunkindex)) { LOG(ERROR) << "request split failed" << ", off = " << off @@ -101,17 +105,16 @@ int Splitor::IO2ChunkRequests(IOTracker* iotracker, } // this offset is begin by chunk -int Splitor::SingleChunkIO2ChunkRequests(IOTracker* iotracker, - MetaCache* mc, - std::list* targetlist, - const ChunkIDInfo_t idinfo, - const char* data, - off_t offset, - uint64_t length, - uint64_t seq) { - if (targetlist == nullptr || mc == nullptr || - iotracker == nullptr || data == nullptr) { - return -1; +int Splitor::SingleChunkIO2ChunkRequests( + IOTracker* iotracker, MetaCache* mc, + std::vector* targetlist, const ChunkIDInfo_t idinfo, + butil::IOBuf* data, off_t offset, uint64_t length, uint64_t seq) { + if (targetlist == nullptr || mc == nullptr || iotracker == nullptr) { + return -1; + } + + if (iotracker->Optype() == OpType::WRITE && data == nullptr) { + return -1; } auto max_split_size_bytes = 1024 * iosplitopt_.fileIOSplitMaxSizeKB; @@ -130,12 +133,16 @@ int Splitor::SingleChunkIO2ChunkRequests(IOTracker* iotracker, } newreqNode->seq_ = seq; + if (iotracker->Optype() == OpType::WRITE) { - newreqNode->writeBuffer_ = data + off; - } else { - newreqNode->readBuffer_ = const_cast(data + off); + auto nc = data->cutn(&(newreqNode->writeData_), len); + if (nc != len) { + LOG(ERROR) << "IOBuf::cutn failed, expected: " << len + << ", return: " << nc; + return -1; + } } - // newreqNode->data_ = data + off; + newreqNode->offset_ = tempoff; newreqNode->rawlength_ = len; newreqNode->optype_ = iotracker->Optype(); @@ -157,21 +164,16 @@ int Splitor::SingleChunkIO2ChunkRequests(IOTracker* iotracker, return 0; } -bool Splitor::AssignInternal(IOTracker* iotracker, - MetaCache* mc, - std::list* targetlist, - const char* buf, - off_t off, - size_t len, - MDSClient* mdsclient, - const FInfo_t* fileinfo, - ChunkIndex chunkidx) { - auto max_split_size_bytes = 1024 * iosplitopt_.fileIOSplitMaxSizeKB; - +bool Splitor::AssignInternal(IOTracker* iotracker, MetaCache* mc, + std::vector* targetlist, + butil::IOBuf* data, off_t off, size_t len, + MDSClient* mdsclient, const FInfo_t* fileinfo, + ChunkIndex chunkidx) { ChunkIDInfo_t chinfo; SegmentInfo segInfo; LogicalPoolCopysetIDInfo_t lpcsIDInfo; - MetaCacheErrorType chunkidxexist = mc->GetChunkInfoByIndex(chunkidx, &chinfo); // NOLINT + MetaCacheErrorType chunkidxexist = + mc->GetChunkInfoByIndex(chunkidx, &chinfo); if (chunkidxexist == MetaCacheErrorType::CHUNKINFO_NOT_FOUND) { LIBCURVE_ERROR re = mdsclient->GetOrAllocateSegment(true, @@ -222,46 +224,25 @@ bool Splitor::AssignInternal(IOTracker* iotracker, chunkidxexist = mc->GetChunkInfoByIndex(chunkidx, &chinfo); } + if (chunkidxexist == MetaCacheErrorType::OK) { int ret = 0; auto appliedindex_ = mc->GetAppliedIndex(chinfo.lpid_, chinfo.cpid_); - std::list templist; - if (len > max_split_size_bytes) { - ret = SingleChunkIO2ChunkRequests(iotracker, mc, &templist, chinfo, - buf, off, len, fileinfo->seqnum); - - for_each(templist.begin(), templist.end(), [&](RequestContext* it) { - it->appliedindex_ = appliedindex_; - it->sourceInfo_ = - CalcRequestSourceInfo(iotracker, mc, chunkidx); - }); - - targetlist->insert(targetlist->end(), templist.begin(), templist.end()); // NOLINT - } else { - RequestContext* newreqNode = GetInitedRequestContext(); - if (newreqNode == nullptr) { - return -1; - } - newreqNode->seq_ = fileinfo->seqnum; - if (iotracker->Optype() == OpType::WRITE) { - newreqNode->writeBuffer_ = buf; - } else { - newreqNode->readBuffer_ = const_cast(buf); - } - // newreqNode->data_ = buf; - newreqNode->offset_ = off; - newreqNode->rawlength_ = len; - newreqNode->optype_ = iotracker->Optype(); - newreqNode->idinfo_ = chinfo; - newreqNode->appliedindex_ = appliedindex_; - newreqNode->sourceInfo_ = - CalcRequestSourceInfo(iotracker, mc, chunkidx); - newreqNode->done_->SetIOTracker(iotracker); - - targetlist->push_back(newreqNode); + std::vector templist; + ret = SingleChunkIO2ChunkRequests(iotracker, mc, &templist, chinfo, + data, off, len, fileinfo->seqnum); + + for (auto& ctx : templist) { + ctx->appliedindex_ = appliedindex_; + ctx->sourceInfo_ = CalcRequestSourceInfo(iotracker, mc, chunkidx); } + + targetlist->insert(targetlist->end(), templist.begin(), + templist.end()); + return ret == 0; } + LOG(ERROR) << "can not find the chunk index info!" << ", chunk index = " << chunkidx; return false; diff --git a/src/client/splitor.h b/src/client/splitor.h index af6b0cd555..fcaf94fb01 100644 --- a/src/client/splitor.h +++ b/src/client/splitor.h @@ -22,7 +22,9 @@ #ifndef SRC_CLIENT_SPLITOR_H_ #define SRC_CLIENT_SPLITOR_H_ -#include +#include + +#include #include #include "src/client/metacache.h" @@ -43,7 +45,7 @@ class Splitor { * @param: iotracker大IO上下文信息 * @param: mc是io拆分过程中需要使用的缓存信息 * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data是待读写的数据 + * @param: data 是待写的数据 * @param: offset用户下发IO的其实偏移 * @param: length数据长度 * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 @@ -51,8 +53,8 @@ class Splitor { */ static int IO2ChunkRequests(IOTracker* iotracker, MetaCache* mc, - std::list* targetlist, - const char* data, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, size_t length, MDSClient* mdsclient, @@ -63,16 +65,16 @@ class Splitor { * @param: mc是io拆分过程中需要使用的缓存信息 * @param: targetlist大IO被拆分之后的小IO存储列表 * @param: cid是当前chunk的ID信息 - * @param: data是待读写的数据 + * @param: data是待写的数据 * @param: offset是当前chunk内的偏移 * @param: length数据长度 * @param: seq是当前chunk的版本号 */ static int SingleChunkIO2ChunkRequests(IOTracker* iotracker, MetaCache* mc, - std::list* targetlist, + std::vector* targetlist, const ChunkIDInfo_t cid, - const char* data, + butil::IOBuf* data, off_t offset, size_t length, uint64_t seq); @@ -94,7 +96,7 @@ class Splitor { * @param: iotracker大IO上下文信息 * @param: mc是io拆分过程中需要使用的缓存信息 * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data是待读写的数据 + * @param: data 是待写的数据 * @param: offset用户下发IO的其实偏移 * @param: length数据长度 * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 @@ -103,8 +105,8 @@ class Splitor { */ static bool AssignInternal(IOTracker* iotracker, MetaCache* mc, - std::list* targetlist, - const char* data, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, uint64_t length, MDSClient* mdsclient, diff --git a/test/chunkserver/clone/clone_copyer_test.cpp b/test/chunkserver/clone/clone_copyer_test.cpp index ecae5b2125..05dff1ca41 100644 --- a/test/chunkserver/clone/clone_copyer_test.cpp +++ b/test/chunkserver/clone/clone_copyer_test.cpp @@ -137,8 +137,9 @@ TEST_F(CloneCopyerTest, BasicTest) { context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _)) .WillOnce(Return(1)); - EXPECT_CALL(*curveClient_, AioRead(_, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context){ + EXPECT_CALL(*curveClient_, AioRead(_, _, _)) + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { context->ret = 1024; context->cb(context); return LIBCURVE_ERROR::OK; @@ -154,8 +155,9 @@ TEST_F(CloneCopyerTest, BasicTest) { context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _)) .Times(0); - EXPECT_CALL(*curveClient_, AioRead(_, _)) - .WillOnce(Invoke([](int fd, CurveAioContext* context){ + EXPECT_CALL(*curveClient_, AioRead(_, _, _)) + .WillOnce(Invoke([](int fd, CurveAioContext* context, + curve::client::UserDataType dataType) { context->ret = -1; context->cb(context); return LIBCURVE_ERROR::OK; @@ -171,7 +173,7 @@ TEST_F(CloneCopyerTest, BasicTest) { context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _)) .WillOnce(Return(-1)); - EXPECT_CALL(*curveClient_, AioRead(_, _)) + EXPECT_CALL(*curveClient_, AioRead(_, _, _)) .Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); @@ -184,7 +186,7 @@ TEST_F(CloneCopyerTest, BasicTest) { context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _)) .WillOnce(Return(2)); - EXPECT_CALL(*curveClient_, AioRead(_, _)) + EXPECT_CALL(*curveClient_, AioRead(_, _, _)) .WillOnce(Return(-1 * LIBCURVE_ERROR::FAILED)); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); @@ -268,7 +270,7 @@ TEST_F(CloneCopyerTest, DisableTest) { context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _)) .Times(0); - EXPECT_CALL(*curveClient_, AioRead(_, _)) + EXPECT_CALL(*curveClient_, AioRead(_, _, _)) .Times(0); copyer.DownloadAsync(&closure); ASSERT_TRUE(closure.IsRun()); diff --git a/test/client/client_metric_test.cpp b/test/client/client_metric_test.cpp index fe9d7a45ef..3ecda775a8 100644 --- a/test/client/client_metric_test.cpp +++ b/test/client/client_metric_test.cpp @@ -272,7 +272,7 @@ TEST(MetricTest, SuspendRPC_MetricTest) { aioctx->op = LIBCURVE_OP_WRITE; aioctx->length = 4 * 1024; aioctx->cb = cb; - fi.AioWrite(aioctx); + fi.AioWrite(aioctx, UserDataType::RawBuffer); std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_EQ(fm->suspendRPCMetric.count.get_value(), 1); diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 38cbc646b7..bae5c6f22e 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -185,7 +185,7 @@ TEST(ClientSession, LeaseTaskTest) { ioSleepTime = TimeUtility::GetTimeofDayUs(); - ASSERT_EQ(0, fileinstance.AioRead(&aioctx2)); + ASSERT_EQ(0, fileinstance.AioRead(&aioctx2, UserDataType::RawBuffer)); std::this_thread::sleep_for(std::chrono::seconds(SLEEP_TIME_S)); diff --git a/test/client/copyset_client_test.cpp b/test/client/copyset_client_test.cpp index 7c60b7f613..d602f8fd2d 100644 --- a/test/client/copyset_client_test.cpp +++ b/test/client/copyset_client_test.cpp @@ -200,6 +200,9 @@ TEST_F(CopysetClientTest, normal_test) { buff2[8] = '\0'; off_t offset = 0; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + ChunkServerID leaderId = 10000; butil::EndPoint leaderAddr; std::string leaderStr = "127.0.0.1:9109"; @@ -207,6 +210,7 @@ TEST_F(CopysetClientTest, normal_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); // write success for (int i = 0; i < 10; ++i) { @@ -214,8 +218,8 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); + reqCtx->writeData_ = iobuf; - reqCtx->writeBuffer_ = buff1; reqCtx->offset_ = i * 8; reqCtx->rawlength_ = len; @@ -236,7 +240,7 @@ TEST_F(CopysetClientTest, normal_test) { .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -246,8 +250,7 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; @@ -269,7 +272,7 @@ TEST_F(CopysetClientTest, normal_test) { .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -279,8 +282,7 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; @@ -303,7 +305,7 @@ TEST_F(CopysetClientTest, normal_test) { .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -316,9 +318,9 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = i * 8; reqCtx->rawlength_ = len; + reqCtx->subIoIndex_ = 0; curve::common::CountDownEvent cond(1); RequestClosure *reqDone = new FakeRequestClosure(&cond, reqCtx); @@ -347,8 +349,7 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; @@ -380,8 +381,7 @@ TEST_F(CopysetClientTest, normal_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; @@ -452,6 +452,9 @@ TEST_F(CopysetClientTest, write_error_test) { buff2[8] = '\0'; off_t offset = 0; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); + ChunkServerID leaderId = 10000; butil::EndPoint leaderAddr; std::string leaderStr = "127.0.0.1:9109"; @@ -466,8 +469,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -488,7 +490,7 @@ TEST_F(CopysetClientTest, write_error_test) { .WillOnce(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, reqDone->GetErrorCode()); @@ -499,8 +501,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -521,7 +522,7 @@ TEST_F(CopysetClientTest, write_error_test) { EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) .WillRepeatedly(Invoke(WriteChunkFunc)); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -535,8 +536,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -561,7 +561,7 @@ TEST_F(CopysetClientTest, write_error_test) { EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(3) .WillRepeatedly(Invoke(WriteChunkFunc)); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -579,8 +579,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -606,13 +605,13 @@ TEST_F(CopysetClientTest, write_error_test) { .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, reqDone->GetErrorCode()); uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 29000); + ASSERT_GT(end - start, 28000); ASSERT_LT(end - start, 2 * 50000); gWriteCntlFailedCode = 0; } @@ -623,8 +622,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -644,7 +642,7 @@ TEST_F(CopysetClientTest, write_error_test) { .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); @@ -655,7 +653,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -682,7 +680,7 @@ TEST_F(CopysetClientTest, write_error_test) { .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -695,8 +693,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -722,7 +719,7 @@ TEST_F(CopysetClientTest, write_error_test) { Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -733,8 +730,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -763,7 +759,7 @@ TEST_F(CopysetClientTest, write_error_test) { .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -774,8 +770,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -801,13 +796,13 @@ TEST_F(CopysetClientTest, write_error_test) { Invoke(WriteChunkFunc))); auto startTimeUs = curve::common::TimeUtility::GetTimeofDayUs(); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); auto elpased = curve::common::TimeUtility::GetTimeofDayUs() - startTimeUs; // chunkserverOPRetryIntervalUS = 5000 // 每次redirect睡眠500us,共重试3次,所以总共耗费时间大于1500us - ASSERT_GE(elpased, 1500); + ASSERT_GE(elpased, 1000); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); @@ -818,8 +813,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -840,7 +834,7 @@ TEST_F(CopysetClientTest, write_error_test) { .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); @@ -851,8 +845,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -878,7 +871,7 @@ TEST_F(CopysetClientTest, write_error_test) { .WillOnce(DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); @@ -926,6 +919,8 @@ TEST_F(CopysetClientTest, write_failed_test) { buff1[8] = '\0'; buff2[8] = '\0'; off_t offset = 0; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(buff1) - 1); ChunkServerID leaderId = 10000; butil::EndPoint leaderAddr; @@ -941,8 +936,7 @@ TEST_F(CopysetClientTest, write_failed_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -966,7 +960,7 @@ TEST_F(CopysetClientTest, write_failed_test) { EXPECT_CALL(mockChunkService, WriteChunk(_, _, _, _)).Times(50) .WillRepeatedly(Invoke(WriteChunkFunc)); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_NE(0, reqDone->GetErrorCode()); @@ -983,8 +977,7 @@ TEST_F(CopysetClientTest, write_failed_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1010,7 +1003,7 @@ TEST_F(CopysetClientTest, write_failed_test) { .WillRepeatedly(DoAll(SetArgPointee<2>(response), Invoke(WriteChunkFunc))); copysetClient.WriteChunk(reqCtx->idinfo_, 0, - buff1, offset, len, {}, reqDone); + iobuf, offset, len, {}, reqDone); cond.Wait(); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, reqDone->GetErrorCode()); @@ -1073,6 +1066,7 @@ TEST_F(CopysetClientTest, read_failed_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); /* controller set timeout */ { @@ -1080,7 +1074,7 @@ TEST_F(CopysetClientTest, read_failed_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1123,8 +1117,7 @@ TEST_F(CopysetClientTest, read_failed_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1213,6 +1206,7 @@ TEST_F(CopysetClientTest, read_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); + iot.PrepareReadIOBuffers(1); /* 非法参数 */ { @@ -1220,8 +1214,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1252,8 +1245,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1283,7 +1275,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1310,7 +1302,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_NE(0, reqDone->GetErrorCode()); uint64_t end = TimeUtility::GetTimeofDayUs(); - ASSERT_GT(end - start, 1400); + ASSERT_GT(end - start, 1000); gReadCntlFailedCode = 0; } @@ -1320,7 +1312,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1364,8 +1356,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1407,8 +1398,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1439,8 +1429,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1478,8 +1467,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1521,8 +1509,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1564,8 +1551,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1599,8 +1585,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1632,8 +1617,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - - reqCtx->readBuffer_ = buff1; + reqCtx->subIoIndex_ = 0; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1723,7 +1707,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1756,7 +1739,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1819,7 +1801,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1852,7 +1833,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1892,7 +1872,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1929,7 +1908,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -1970,7 +1948,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -2006,7 +1983,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -2040,7 +2016,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -2078,7 +2053,6 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { reqCtx->seq_ = sn; - reqCtx->readBuffer_ = buff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -3639,7 +3613,7 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { aioctx->cb = WriteCallBack; // 下发写请求 - fileinstance.AioWrite(aioctx); + fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); std::this_thread::sleep_for(std::chrono::seconds(sec)); return gWriteSuccessFlag; @@ -3698,7 +3672,9 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { CopysetID copysetId = 100001; ChunkID chunkId = 1; size_t len = 8; - char buff1[8 + 1] = {0}; + char buff1[8] = {0}; + butil::IOBuf iobuf; + iobuf.append(buff1, sizeof(len)); off_t offset = 0; ChunkServerID leaderId = 10000; @@ -3716,7 +3692,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->writeBuffer_ = buff1; + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -3750,7 +3727,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, 0, buff1, offset, len, {}, + copysetClient.WriteChunk(reqCtx->idinfo_, 0, iobuf, offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); @@ -3767,7 +3744,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->writeBuffer_ = buff1; + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -3798,7 +3776,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, 0, buff1, offset, len, {}, + copysetClient.WriteChunk(reqCtx->idinfo_, 0, iobuf, offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); @@ -3815,7 +3793,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->writeBuffer_ = buff1; + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -3845,7 +3824,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, 0, buff1, offset, len, {}, + copysetClient.WriteChunk(reqCtx->idinfo_, 0, iobuf, offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); @@ -3862,7 +3841,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->writeBuffer_ = buff1; + // reqCtx->writeBuffer_ = buff1; + reqCtx->writeData_ = iobuf; reqCtx->offset_ = 0; reqCtx->rawlength_ = len; @@ -3890,7 +3870,7 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { .WillOnce( DoAll(SetArgPointee<2>(response2), Invoke(WriteChunkFunc))); auto startUs = curve::common::TimeUtility::GetTimeofDayUs(); - copysetClient.WriteChunk(reqCtx->idinfo_, 0, buff1, offset, len, {}, + copysetClient.WriteChunk(reqCtx->idinfo_, 0, iobuf, offset, len, {}, reqDone); cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); @@ -3945,7 +3925,7 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { .Times(1); TestRunnedRequestClosure closure; - copysetClient.WriteChunk({}, 0, 0, 0, 0, {}, &closure); + copysetClient.WriteChunk({}, 0, {}, 0, 0, {}, &closure); ASSERT_FALSE(closure.IsRunned()); } } diff --git a/test/client/fake/BUILD b/test/client/fake/BUILD index 365c400cc5..ab41c126c4 100644 --- a/test/client/fake/BUILD +++ b/test/client/fake/BUILD @@ -49,6 +49,7 @@ cc_binary( "//external:leveldb", "//external:brpc", "//external:braft", + "//external:butil", "//external:protobuf", "//src/common:curve_common", "//include/client:include_client", diff --git a/test/client/fake/mock_schedule.cpp b/test/client/fake/mock_schedule.cpp index 67e6719bc7..3ea2fd66a9 100644 --- a/test/client/fake/mock_schedule.cpp +++ b/test/client/fake/mock_schedule.cpp @@ -21,6 +21,8 @@ */ #include +#include + #include #include "test/client/fake/mock_schedule.h" @@ -35,90 +37,82 @@ struct datastruct { char* data; }; +butil::IOBuf writeData; char* writebuffer; int Schedule::ScheduleRequest( - const std::list reqlist) { - // LOG(INFO) << "ENTER MOCK ScheduleRequest"; - char fakedate[10] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k'}; - curve::client::OpType type = curve::client::OpType::UNKNOWN; - int size = reqlist.size(); - int processed = 0; - int totallength = 0; - std::vector datavec; - LOG(ERROR) << size; + const std::vector& reqlist) { + // LOG(INFO) << "ENTER MOCK ScheduleRequest"; + char fakedate[10] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k'}; + curve::client::OpType type = curve::client::OpType::UNKNOWN; + int size = reqlist.size(); + int processed = 0; + int totallength = 0; + std::vector datavec; + LOG(ERROR) << size; - if (enableScheduleFailed) { - return -1; - } + if (enableScheduleFailed) { + return -1; + } - fiu_do_on("client_request_schedule_sleep", - auto func = [&] () { - LOG(INFO) << "start sleep! " << sleeptimeMS << " ms"; - std::this_thread::sleep_for( - std::chrono::milliseconds(sleeptimeMS)); + fiu_do_on("client_request_schedule_sleep", + auto func = + [&]() { + LOG(INFO) << "start sleep! " << sleeptimeMS << " ms"; + std::this_thread::sleep_for( + std::chrono::milliseconds(sleeptimeMS)); }; - func();); + func();); + + for (auto iter : reqlist) { + auto req = iter->done_->GetReqCtx(); + if (iter->optype_ == curve::client::OpType::READ_SNAP) { + char buf[iter->rawlength_]; // NOLINT + memset(buf, fakedate[processed % 10], iter->rawlength_); + iter->readData_.append(buf, iter->rawlength_); + } - for (auto iter : reqlist) { - auto req = iter->done_->GetReqCtx(); - if (iter->optype_ == curve::client::OpType::READ_SNAP) { - memset(iter->readBuffer_, - fakedate[processed%10], - iter->rawlength_); - } + if (iter->optype_ == curve::client::OpType::GET_CHUNK_INFO) { + req->seq_ = 1111; + req->chunkinfodetail_->chunkSn.push_back(2222); + } + + if (iter->optype_ == curve::client::OpType::READ) { + char buffer[iter->rawlength_]; // NOLINT + memset(buffer, fakedate[processed % 10], iter->rawlength_); + iter->readData_.append(buffer, iter->rawlength_); - if (iter->optype_ == curve::client::OpType::GET_CHUNK_INFO) { - req->seq_ = 1111; - req->chunkinfodetail_->chunkSn.push_back(2222); - } + // LOG(ERROR) << "request split" + // << ", off = " << iter->offset_ + // << ", len = " << iter->rawlength_ + // << ", seqnum = " << iter->seq_ + // << ", chunkindex = " << iter->idinfo_.cid_ + // << ", content = " << fakedate[processed%10] + // << ", address = " << &(iter->readBuffer_); + } - if (iter->optype_ == curve::client::OpType::READ) { - memset(iter->readBuffer_, - fakedate[processed%10], - iter->rawlength_); - // LOG(ERROR) << "request split" - // << ", off = " << iter->offset_ - // << ", len = " << iter->rawlength_ - // << ", seqnum = " << iter->seq_ - // << ", chunkindex = " << iter->idinfo_.cid_ - // << ", content = " << fakedate[processed%10] - // << ", address = " << &(iter->readBuffer_); - } + if (iter->optype_ == curve::client::OpType::WRITE) { + type = curve::client::OpType::WRITE; + writeData.append(iter->writeData_); + } + processed++; + // LOG(INFO) << "current request context chunkID : " + // << iter->idinfo_.cid_ + // << ", copyset id = " + // << iter->idinfo_.cpid_ + // << ", logic pool id =" + // << iter->idinfo_.lpid_ + // << ", offset = " + // << iter->offset_ + // << ", length = " + // << iter->rawlength_; - if (iter->optype_ == curve::client::OpType::WRITE) { - type = curve::client::OpType::WRITE; - datastruct datas; - datas.length = iter->rawlength_; - datas.data = const_cast(iter->writeBuffer_); - totallength += iter->rawlength_; - datavec.push_back(datas); - } - processed++; - // LOG(INFO) << "current request context chunkID : " - // << iter->idinfo_.cid_ - // << ", copyset id = " - // << iter->idinfo_.cpid_ - // << ", logic pool id =" - // << iter->idinfo_.lpid_ - // << ", offset = " - // << iter->offset_ - // << ", length = " - // << iter->rawlength_; - if (processed >= size) { - if (type == curve::client::OpType::WRITE) { - writebuffer = new char[totallength]; - uint32_t tempoffert = 0; - for (auto it : datavec) { - memcpy(writebuffer + tempoffert, it.data, it.length); - tempoffert += it.length; - } - } - iter->done_->SetFailed(0); - iter->done_->Run(); - break; - } + if (processed >= size) { iter->done_->SetFailed(0); iter->done_->Run(); + break; } - return 0; + iter->done_->SetFailed(0); + iter->done_->Run(); + } + return 0; } diff --git a/test/client/fake/mock_schedule.h b/test/client/fake/mock_schedule.h index 4594ce8053..703c1112a6 100644 --- a/test/client/fake/mock_schedule.h +++ b/test/client/fake/mock_schedule.h @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include // NOLINT #include // NOLINT @@ -50,15 +50,15 @@ class Schedule { } int ScheduleRequest( - const std::list reqlist); + const std::vector& reqlist); bool enableScheduleFailed; }; class MockRequestScheduler : public curve::client::RequestScheduler { public: - using REQ = std::list; - MOCK_METHOD1(ScheduleRequest, int(const REQ)); + using REQ = std::vector; + MOCK_METHOD1(ScheduleRequest, int(const REQ&)); void DelegateToFake() { ON_CALL(*this, ScheduleRequest(_)) diff --git a/test/client/iotracker_splitor_unittest.cpp b/test/client/iotracker_splitor_unittest.cpp index 7e86e57430..3250e9d288 100644 --- a/test/client/iotracker_splitor_unittest.cpp +++ b/test/client/iotracker_splitor_unittest.cpp @@ -20,57 +20,43 @@ * Author: tongguangxun */ -#include -#include -#include -#include #include #include +#include +#include +#include +#include +#include //NOLINT +#include //NOLINT +#include // NOLINT #include -#include //NOLINT -#include //NOLINT -#include // NOLINT -#include //NOLINT +#include //NOLINT -#include "src/client/config_info.h" -#include "test/client/fake/mock_schedule.h" -#include "src/client/io_tracker.h" -#include "src/client/splitor.h" -#include "src/client/request_context.h" -#include "test/client/fake/mockMDS.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" +#include "src/client/config_info.h" #include "src/client/file_instance.h" -#include "src/client/metacache.h" +#include "src/client/io_tracker.h" #include "src/client/iomanager4file.h" #include "src/client/libcurve_file.h" -#include "src/client/client_config.h" #include "src/client/mds_client.h" +#include "src/client/metacache.h" #include "src/client/metacache_struct.h" +#include "src/client/request_context.h" +#include "src/client/splitor.h" #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" +#include "test/client/fake/mock_schedule.h" extern std::string mdsMetaServerAddr; extern uint32_t chunk_size; extern std::string configpath; -extern char* writebuffer; - -using curve::client::UserInfo_t; -using curve::client::CopysetInfo_t; -using curve::client::SegmentInfo; -using curve::client::FInfo_t; -using curve::client::MDSClient; -using curve::client::ClientConfig; -using curve::client::FileInstance; -using curve::client::IOTracker; -using curve::client::MetaCache; -using curve::client::RequestContext; -using curve::client::IOManager4File; -using curve::client::LogicalPoolCopysetIDInfo_t; -using curve::client::FileMetric; -using curve::client::OpType; -using curve::client::ChunkIDInfo; -using curve::client::Splitor; +extern butil::IOBuf writeData; + +namespace curve { +namespace client { bool ioreadflag = false; std::mutex readmtx; @@ -121,6 +107,8 @@ class IOTrackerSplitorTest : public ::testing::Test { } void TearDown() { + writeData.clear(); + fileinstance_->UnInitialize(); mdsclient_.UnInitialize(); delete fileinstance_; @@ -288,8 +276,8 @@ TEST_F(IOTrackerSplitorTest, AsyncStartRead) { iomana->SetRequestScheduler(mockschuler); CurveAioContext aioctx; - aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; - aioctx.length = 4 * 1024 * 1024 + 8 * 1024; + aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; // 4M - 4k + aioctx.length = 4 * 1024 * 1024 + 8 * 1024; // 4M + 8K aioctx.ret = LIBCURVE_ERROR::OK; aioctx.cb = readcallback; aioctx.buf = new char[aioctx.length]; @@ -297,7 +285,7 @@ TEST_F(IOTrackerSplitorTest, AsyncStartRead) { ioreadflag = false; char* data = static_cast(aioctx.buf); - iomana->AioRead(&aioctx, &mdsclient_); + iomana->AioRead(&aioctx, &mdsclient_, UserDataType::RawBuffer); { std::unique_lock lk(readmtx); @@ -339,13 +327,17 @@ TEST_F(IOTrackerSplitorTest, AsyncStartWrite) { fi.chunksize = 4 * 1024 * 1024; fi.segmentsize = 1 * 1024 * 1024 * 1024ul; iowriteflag = false; - iomana->AioWrite(&aioctx, &mdsclient_); + iomana->AioWrite(&aioctx, &mdsclient_, UserDataType::RawBuffer); { std::unique_lock lk(writemtx); - writecv.wait(lk, []()->bool{return iowriteflag;}); + writecv.wait(lk, []() -> bool { return iowriteflag; }); } + std::unique_ptr writebuffer(new char[aioctx.length]); + memcpy(writebuffer.get(), writeData.to_string().c_str(), aioctx.length); + + // check butil::IOBuf write data ASSERT_EQ('a', writebuffer[0]); ASSERT_EQ('a', writebuffer[4 * 1024 - 1]); ASSERT_EQ('b', writebuffer[4 * 1024]); @@ -412,6 +404,9 @@ TEST_F(IOTrackerSplitorTest, StartWrite) { process.join(); } + std::unique_ptr writebuffer(new char[length]); + memcpy(writebuffer.get(), writeData.to_string().c_str(), length); + ASSERT_EQ('a', writebuffer[0]); ASSERT_EQ('a', writebuffer[4 * 1024 - 1]); ASSERT_EQ('b', writebuffer[4 * 1024]); @@ -438,7 +433,7 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartRead) { ioreadflag = false; char* data = static_cast(aioctx->buf); - ioctxmana->AioRead(aioctx, &mdsclient_); + ioctxmana->AioRead(aioctx, &mdsclient_, UserDataType::RawBuffer); { std::unique_lock lk(readmtx); @@ -475,13 +470,16 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWrite) { memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); iowriteflag = false; - ioctxmana->AioWrite(aioctx, &mdsclient_); + ioctxmana->AioWrite(aioctx, &mdsclient_, UserDataType::RawBuffer); { std::unique_lock lk(writemtx); writecv.wait(lk, []()->bool{return iowriteflag;}); } + std::unique_ptr writebuffer(new char[aioctx->length]); + memcpy(writebuffer.get(), writeData.to_string().c_str(), aioctx->length); + ASSERT_EQ('a', writebuffer[0]); ASSERT_EQ('a', writebuffer[4 * 1024 - 1]); ASSERT_EQ('b', writebuffer[4 * 1024]); @@ -648,6 +646,9 @@ TEST_F(IOTrackerSplitorTest, ManagerStartWrite) { process.join(); } + std::unique_ptr writebuffer(new char[length]); + memcpy(writebuffer.get(), writeData.to_string().c_str(), length); + ASSERT_EQ('a', writebuffer[0]); ASSERT_EQ('a', writebuffer[4 * 1024 - 1]); ASSERT_EQ('b', writebuffer[4 * 1024]); @@ -685,17 +686,15 @@ TEST_F(IOTrackerSplitorTest, ExceptionTest_TEST) { auto waitfunc = [&]() { int retlen = iotracker->Wait(); - ASSERT_EQ(-1 * LIBCURVE_ERROR::FAILED, retlen); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, retlen); }; auto threadfunc = [&]() { - iotracker->StartWrite(nullptr, - nullptr, - offset, - length, - &mdsclient_, - &fi); + iotracker->SetUserDataType(UserDataType::RawBuffer); + iotracker->StartWrite( + nullptr, offset, length, &mdsclient_, &fi); }; + std::thread process(threadfunc); std::thread waitthread(waitfunc); @@ -754,12 +753,15 @@ TEST_F(IOTrackerSplitorTest, largeIOTest) { /** * this offset and length will make splitor split into two 8k IO. */ - uint64_t length = 2 * 64 * 1024; - uint64_t offset = 4 * 1024 * 1024 - length; + uint64_t length = 2 * 64 * 1024; // 128KB + uint64_t offset = 4 * 1024 * 1024 - length; // 4MB - 128KB char* buf = new char[length]; - memset(buf, 'a', 64 * 1024); - memset(buf + 64 * 1024, 'b', 64 * 1024); + + memset(buf, 'a', 64 * 1024); // 64KB + memset(buf + 64 * 1024, 'b', 64 * 1024); // 64KB + butil::IOBuf writeData; + writeData.append(buf, length); FInfo_t fi; fi.seqnum = 0; fi.chunksize = 4 * 1024 * 1024; @@ -768,14 +770,16 @@ TEST_F(IOTrackerSplitorTest, largeIOTest) { MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); IOTracker* iotracker = new IOTracker(iomana, mc, &mockschuler); + iotracker->SetOpType(OpType::WRITE); curve::client::ChunkIDInfo chinfo(1, 2, 3); mc->UpdateChunkInfoByIndex(0, chinfo); - std::list reqlist; + std::vector reqlist; + auto dataCopy = writeData; ASSERT_EQ(0, curve::client::Splitor::IO2ChunkRequests(iotracker, mc, &reqlist, - buf, + &dataCopy, offset, length, &mdsclient_, @@ -783,14 +787,16 @@ TEST_F(IOTrackerSplitorTest, largeIOTest) { ASSERT_EQ(2, reqlist.size()); RequestContext* first = reqlist.front(); - reqlist.pop_front(); + reqlist.erase(reqlist.begin()); RequestContext* second = reqlist.front(); - reqlist.pop_front(); + reqlist.erase(reqlist.begin()); - for (int i = 0; i < 64 * 1024; i++) { - ASSERT_EQ(97, (char)(*(first->readBuffer_ + i))); - ASSERT_EQ(98, (char)(*(second->readBuffer_ + i))); - } + // first 64KB is 'a' + // seconds 64KB is 'b' + butil::IOBuf splitData; + splitData.append(first->writeData_); + splitData.append(second->writeData_); + ASSERT_EQ(writeData, splitData); ASSERT_EQ(1, first->idinfo_.cid_); ASSERT_EQ(3, first->idinfo_.cpid_); @@ -814,90 +820,64 @@ TEST_F(IOTrackerSplitorTest, InvalidParam) { uint64_t length = 2 * 64 * 1024; uint64_t offset = 4 * 1024 * 1024 - length; char* buf = new char[length]; + butil::IOBuf iobuf; MetaCache* mc = fileinstance_->GetIOManager4File()->GetMetaCache(); - std::list reqlist; + std::vector reqlist; FInfo_t fi; IOTracker* iotracker = new IOTracker(nullptr, nullptr, nullptr); curve::client::ChunkIDInfo cid(0, 0, 0); - ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests(nullptr, mc, - &reqlist, - buf, - offset, - length, - &mdsclient_, - &fi)); - ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests(nullptr, mc, // NOLINT - &reqlist, - cid, - buf, - offset, - length, - 0)); - ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests(iotracker, nullptr, - &reqlist, - buf, - offset, - length, - &mdsclient_, - nullptr)); - ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests(iotracker, nullptr, // NOLINT - &reqlist, - cid, - buf, - offset, - length, - 0)); - ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests(iotracker, mc, - &reqlist, - buf, - offset, - length, - &mdsclient_, - nullptr)); - ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests(iotracker, mc, - &reqlist, - buf, - offset, - length, - nullptr, - &fi)); - ASSERT_EQ(0, curve::client::Splitor::SingleChunkIO2ChunkRequests(iotracker, mc, // NOLINT - &reqlist, - cid, - buf, - offset, - length, - 0)); - ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests(iotracker, mc, - nullptr, - buf, - offset, - length, - &mdsclient_, - nullptr)); - ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests(iotracker, mc, // NOLINT - nullptr, - cid, - buf, - offset, - length, - 0)); - ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests(iotracker, mc, - &reqlist, - nullptr, - offset, - length, - &mdsclient_, - nullptr)); - ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests(iotracker, mc, // NOLINT - &reqlist, - cid, - nullptr, - offset, - length, - 0)); + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + nullptr, mc, &reqlist, &iobuf, offset, length, + &mdsclient_, &fi)); + + ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( + nullptr, mc, + &reqlist, cid, &iobuf, offset, length, 0)); + + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, nullptr, &reqlist, &iobuf, offset, length, + &mdsclient_, nullptr)); + + ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( + iotracker, nullptr, + &reqlist, cid, &iobuf, offset, length, 0)); + + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, &reqlist, &iobuf, offset, length, + &mdsclient_, nullptr)); + + ASSERT_EQ( + -1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, &reqlist, &iobuf, offset, length, nullptr, &fi)); + + ASSERT_EQ(0, curve::client::Splitor::SingleChunkIO2ChunkRequests( + iotracker, mc, + &reqlist, cid, &iobuf, offset, length, 0)); + + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, nullptr, &iobuf, offset, length, + &mdsclient_, nullptr)); + + ASSERT_EQ(-1, curve::client::Splitor::SingleChunkIO2ChunkRequests( + iotracker, mc, + nullptr, cid, &iobuf, offset, length, 0)); + + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, &reqlist, nullptr, offset, length, + &mdsclient_, nullptr)); + + iotracker->SetOpType(OpType::WRITE); + ASSERT_EQ(-1, + curve::client::Splitor::SingleChunkIO2ChunkRequests( + iotracker, mc, &reqlist, cid, nullptr, offset, length, 0)); + + // write request, but write data is nullptr + iotracker->SetOpType(OpType::WRITE); + ASSERT_EQ(-1, curve::client::Splitor::IO2ChunkRequests( + iotracker, mc, &reqlist, nullptr, offset, length, + &mdsclient_, &fi)); delete iotracker; delete[] buf; @@ -967,3 +947,6 @@ TEST(SplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); } + +} // namespace client +} // namespace curve diff --git a/test/client/lease_excutor_test.cpp b/test/client/lease_executor_test.cpp similarity index 86% rename from test/client/lease_excutor_test.cpp rename to test/client/lease_executor_test.cpp index ac88fa6da1..d2612417df 100644 --- a/test/client/lease_excutor_test.cpp +++ b/test/client/lease_executor_test.cpp @@ -35,6 +35,15 @@ namespace curve { namespace client { +extern std::string mdsMetaServerAddr; +extern uint32_t chunk_size; +extern std::string configpath; +extern curve::client::FileClient* globalclient; + +using curve::mds::CurveFSService; +using curve::mds::topology::TopologyService; +using curve::mds::topology::GetChunkServerListInCopySetsResponse; + TEST(LeaseExecutorBaseTest, test_StartFailed) { UserInfo_t userInfo; MDSClient mdsClient; diff --git a/test/client/libcurve_client_unittest.cpp b/test/client/libcurve_client_unittest.cpp index 4455758011..46b2348699 100644 --- a/test/client/libcurve_client_unittest.cpp +++ b/test/client/libcurve_client_unittest.cpp @@ -41,11 +41,10 @@ extern std::string configpath; namespace curve { namespace client { -using curve::client::EndPoint; using ::testing::_; using ::testing::Return; -const uint32_t kBufSize = 4 * 1024; +const uint32_t kBufSize = 128 * 1024; // 128KB const uint64_t kFileSize = 10ul * 1024 * 1024 * 1024; const uint64_t kNewSize = 20ul * 1024 * 1024 * 1024; const char* kFileName = "1_userinfo_test.img"; @@ -148,14 +147,14 @@ TEST_F(CurveClientTest, AioReadWriteTest) { memset(buffer, 'a', kBufSize); event.Reset(1); - ASSERT_EQ(0, client_.AioWrite(fd, &aioctx)); + ASSERT_EQ(0, client_.AioWrite(fd, &aioctx, UserDataType::RawBuffer)); event.Wait(); ASSERT_EQ(aioctx.ret, aioctx.length); aioctx.op = LIBCURVE_OP_READ; memset(buffer, '0', kBufSize); event.Reset(1); - ASSERT_EQ(0, client_.AioRead(fd, &aioctx)); + ASSERT_EQ(0, client_.AioRead(fd, &aioctx, UserDataType::RawBuffer)); event.Wait(); ASSERT_EQ(aioctx.ret, aioctx.length); diff --git a/test/client/libcurve_interface_unittest.cpp b/test/client/libcurve_interface_unittest.cpp index 7f8f4dcbc4..a830eb1113 100644 --- a/test/client/libcurve_interface_unittest.cpp +++ b/test/client/libcurve_interface_unittest.cpp @@ -20,43 +20,37 @@ * Author: tongguangxun */ -#include +#include #include #include -#include -#include -#include -#include //NOLINT -#include // NOLINT +#include +#include // NOLINT #include // NOLINT -#include // NOLINT +#include +#include // NOLINT +#include +#include //NOLINT #include "include/client/libcurve.h" +#include "src/client/chunk_closure.h" +#include "src/client/client_common.h" #include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" -#include "test/client/fake/fakeMDS.h" #include "src/client/libcurve_file.h" -#include "src/client/client_common.h" -#include "src/client/chunk_closure.h" - -using curve::client::MetaCacheErrorType; -using curve::client::ChunkIDInfo_t; -using curve::client::ChunkServerAddr; -using curve::client::MetaCache; -using curve::client::UserInfo_t; -using curve::client::EndPoint; -using curve::client::MDSClient; -using curve::client::ClientConfig; -using curve::client::FileInstance; -using curve::client::CopysetInfo_t; -using curve::client::CopysetIDInfo; -using curve::client::FileClient; -using curve::client::FInfo; +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" extern std::string configpath; extern uint32_t chunk_size; extern uint32_t segment_size; +DECLARE_string(chunkserver_list); +DECLARE_uint32(logic_pool_id); +DECLARE_uint32(copyset_num); +DECLARE_uint64(test_disk_size); + +namespace curve { +namespace client { + bool writeflag = false; bool readflag = false; std::mutex writeinterfacemtx; @@ -64,15 +58,12 @@ std::condition_variable writeinterfacecv; std::mutex interfacemtx; std::condition_variable interfacecv; -DECLARE_string(chunkserver_list); -DECLARE_uint32(logic_pool_id); -DECLARE_uint32(copyset_num); -DECLARE_uint64(test_disk_size); void writecallbacktest(CurveAioContext* context) { writeflag = true; writeinterfacecv.notify_one(); LOG(INFO) << "aio call back here, errorcode = " << context->ret; } + void readcallbacktest(CurveAioContext* context) { readflag = true; interfacecv.notify_one(); @@ -956,3 +947,6 @@ TEST(TestLibcurveInterface, ResumeTimeoutBackoff) { mds.UnInitialize(); delete[] buffer; } + +} // namespace client +} // namespace curve diff --git a/test/client/mock_file_client.h b/test/client/mock_file_client.h index 692b7333a5..0a92009591 100644 --- a/test/client/mock_file_client.h +++ b/test/client/mock_file_client.h @@ -41,8 +41,8 @@ class MockFileClient : public FileClient { MOCK_METHOD2(Open4ReadOnly, int(const std::string&, const UserInfo_t&)); MOCK_METHOD4(Read, int(int, char*, off_t, size_t)); MOCK_METHOD4(Write, int(int, const char*, off_t, size_t)); - MOCK_METHOD2(AioRead, int(int, CurveAioContext*)); - MOCK_METHOD2(AioWrite, int(int, CurveAioContext*)); + MOCK_METHOD3(AioRead, int(int, CurveAioContext*, UserDataType)); + MOCK_METHOD3(AioWrite, int(int, CurveAioContext*, UserDataType)); MOCK_METHOD3(StatFile, int(const std::string&, const UserInfo_t&, FileStatInfo*)); diff --git a/test/client/mock_request_context.h b/test/client/mock_request_context.h index f43f8e7051..e9a82498cf 100644 --- a/test/client/mock_request_context.h +++ b/test/client/mock_request_context.h @@ -57,12 +57,15 @@ class FakeRequestClosure : public RequestClosure { cond_->Signal(); } } + void SetFailed(int err) override { errcode_ = err; } + int GetErrorCode() override { return errcode_; } + RequestContext *GetReqCtx() override { return reqCtx_; } diff --git a/test/client/request_scheduler_test.cpp b/test/client/request_scheduler_test.cpp index e7cde81440..84799ddbc9 100644 --- a/test/client/request_scheduler_test.cpp +++ b/test/client/request_scheduler_test.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include "src/client/request_scheduler.h" #include "src/client/client_common.h" @@ -97,7 +98,7 @@ TEST(RequestSchedulerTest, fake_server_test) { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->writeBuffer_ = writebuff; + reqCtx->writeData_.append(writebuff, len); reqCtx->offset_ = offset; reqCtx->rawlength_ = len; @@ -107,7 +108,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(-1, requestScheduler.ScheduleRequest(reqCtxs)); } @@ -115,7 +116,7 @@ TEST(RequestSchedulerTest, fake_server_test) { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->writeBuffer_ = writebuff; + reqCtx->writeData_.append(writebuff, len); reqCtx->offset_ = offset; reqCtx->rawlength_ = len; @@ -142,6 +143,8 @@ TEST(RequestSchedulerTest, fake_server_test) { writebuff1[16] = '\0'; readbuff1[16] = '\0'; cmpbuff1[16] = '\0'; + butil::IOBuf expectReadData; + expectReadData.append(cmpbuff1, 16); const uint64_t len1 = 16; /* write should with attachment size */ { @@ -149,10 +152,8 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; - - reqCtx->writeBuffer_ = writebuff1; + reqCtx->writeData_.append(writebuff1, len1); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; @@ -162,7 +163,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -172,9 +173,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->optype_ = OpType::READ; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - memset(readbuff1, '0', 16); - reqCtx->readBuffer_ = readbuff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; @@ -184,11 +183,11 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); - ASSERT_STREQ(reqCtx->readBuffer_, cmpbuff1); + // ASSERT_STREQ(reqCtx->readBuffer_, cmpbuff1); ASSERT_EQ(0, reqDone->GetErrorCode()); } { @@ -200,7 +199,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->seq_ = sn; ::memset(writebuff1, 'a', 8); ::memset(writebuff1 + 8, '\0', 8); - reqCtx->writeBuffer_ = writebuff1; + reqCtx->writeData_.append(writebuff1, len1); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; @@ -210,7 +209,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -223,7 +222,6 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->seq_ = sn; memset(readbuff1, '0', 16); - reqCtx->readBuffer_ = readbuff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; @@ -233,26 +231,30 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); - ASSERT_EQ(reqCtx->readBuffer_[0], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[1], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[2], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[3], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[4], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[5], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[6], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[7], 'a'); - ASSERT_EQ(reqCtx->readBuffer_[8], '\0'); - ASSERT_EQ(reqCtx->readBuffer_[9], '\0'); - ASSERT_EQ(reqCtx->readBuffer_[10], '\0'); - ASSERT_EQ(reqCtx->readBuffer_[11], '\0'); - ASSERT_EQ(reqCtx->readBuffer_[12], '\0'); - ASSERT_EQ(reqCtx->readBuffer_[13], '\0'); - ASSERT_EQ(reqCtx->readBuffer_[14], '\0'); - ASSERT_EQ(reqCtx->readBuffer_[15], '\0'); + std::unique_ptr readData(new char[len1]); + memcpy(readData.get(), reqCtx->readData_.to_string().c_str(), len1); + ASSERT_EQ(readData[0], 'a'); + ASSERT_EQ(readData[1], 'a'); + ASSERT_EQ(readData[2], 'a'); + ASSERT_EQ(readData[3], 'a'); + ASSERT_EQ(readData[4], 'a'); + ASSERT_EQ(readData[5], 'a'); + ASSERT_EQ(readData[6], 'a'); + ASSERT_EQ(readData[7], 'a'); + ASSERT_EQ(readData[8], '\0'); + ASSERT_EQ(readData[9], '\0'); + ASSERT_EQ(readData[10], '\0'); + ASSERT_EQ(readData[11], '\0'); + ASSERT_EQ(readData[12], '\0'); + ASSERT_EQ(readData[13], '\0'); + ASSERT_EQ(readData[14], '\0'); + ASSERT_EQ(readData[15], '\0'); + + // ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } @@ -263,11 +265,10 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; ::memset(writebuff1, 'a', 16); - reqCtx->writeBuffer_ = writebuff1; + reqCtx->writeData_.append(writebuff1, len1); reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; @@ -277,7 +278,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -291,7 +292,6 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->seq_ = sn; memset(readbuff1, '0', 16); - reqCtx->readBuffer_ = readbuff1; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; @@ -301,11 +301,13 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); - ASSERT_STREQ(reqCtx->readBuffer_, cmpbuff1); + butil::IOBuf expectReadData; + expectReadData.append(cmpbuff1, len1); + ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } // 3. 在 delete snapshot @@ -325,7 +327,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -348,7 +350,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -363,7 +365,6 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->optype_ = OpType::GET_CHUNK_INFO; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->chunkinfodetail_ = &chunkInfo; curve::common::CountDownEvent cond(1); @@ -372,7 +373,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -389,7 +390,6 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->optype_ = OpType::CREATE_CLONE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; reqCtx->offset_ = 0; reqCtx->rawlength_ = len1; @@ -401,7 +401,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -425,7 +425,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -439,9 +439,8 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); - reqCtx->seq_ = sn; - reqCtx->writeBuffer_ = writebuff; + reqCtx->writeData_.append(writebuff, len); reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; @@ -451,7 +450,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -465,7 +464,6 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->seq_ = sn; memset(readbuff, '0', 8); - reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; @@ -475,11 +473,13 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); - ASSERT_STREQ(reqCtx->readBuffer_, cmpbuff); + butil::IOBuf expectReadData; + expectReadData.append(cmpbuff, len); + ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } { @@ -489,7 +489,7 @@ TEST(RequestSchedulerTest, fake_server_test) { memset(readbuff, '0', 8); - reqCtx->readBuffer_ = readbuff; + // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset; reqCtx->rawlength_ = len; @@ -499,7 +499,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->SetIOTracker(&iot); reqCtx->done_ = reqDone; - std::list reqCtxs; + std::vector reqCtxs; reqCtxs.push_back(reqCtx); ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); @@ -515,7 +515,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); reqCtx->seq_ = sn; - reqCtx->writeBuffer_ = writebuff; + reqCtx->writeData_.append(writebuff, len); reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; @@ -546,7 +546,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->seq_ = sn; memset(readbuff, '0', 8); - reqCtx->readBuffer_ = readbuff; + // reqCtx->readBuffer_ = readbuff; reqCtx->offset_ = offset + i; reqCtx->rawlength_ = len; curve::common::CountDownEvent cond(1); @@ -556,7 +556,9 @@ TEST(RequestSchedulerTest, fake_server_test) { reqCtx->done_ = reqDone; ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtx)); cond.Wait(); - ASSERT_STREQ(reqCtx->readBuffer_, cmpbuff); + butil::IOBuf expectReadData; + expectReadData.append(cmpbuff, len); + ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } diff --git a/test/client/request_sender_test.cpp b/test/client/request_sender_test.cpp index 34b232ddfd..4ec9df1654 100644 --- a/test/client/request_sender_test.cpp +++ b/test/client/request_sender_test.cpp @@ -174,7 +174,7 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { FakeChunkClosure closure(&event); sourceInfo.cloneFileSource.clear(); - requestSender.WriteChunk(ChunkIDInfo(), 0, 0, 0, 0, + requestSender.WriteChunk(ChunkIDInfo(), 0, {}, 0, 0, sourceInfo, &closure); event.Wait(); @@ -195,7 +195,7 @@ TEST_F(RequestSenderTest, TestWriteChunkSourceInfo) { sourceInfo.cloneFileSource = "/test_WriteChunkSourceInfo"; sourceInfo.cloneFileOffset = 0; - requestSender.WriteChunk(ChunkIDInfo(), 0, 0, 0, 0, + requestSender.WriteChunk(ChunkIDInfo(), 0, {}, 0, 0, sourceInfo, &closure); event.Wait();