未验证 提交 890fe08e 编写于 作者: W Wang XiangYu 提交者: GitHub

Refactor LOG macro and improve log format (#1927)

* update log format
Signed-off-by: Nwxyu <xy.wang@zilliz.com>

* add new log macro
Signed-off-by: Nwxyu <xy.wang@zilliz.com>

* use new log macro instead
Signed-off-by: NXiangyu Wang <xy.wang@zilliz.com>

* add SetThreadName function
Signed-off-by: Nwxyu <xy.wang@zilliz.com>

* clang-format
Signed-off-by: Nwxyu <xy.wang@zilliz.com>

* set thread name
Signed-off-by: NXiangyu Wang <xy.wang@zilliz.com>

* add changelog
Signed-off-by: Nwxyu <xy.wang@zilliz.com>

* add git ignore .swp file
Signed-off-by: Nwxyu <xy.wang@zilliz.com>

* update log level in LogUtil.cpp
Signed-off-by: Nwxyu <xy.wang@zilliz.com>
上级 407cedd7
......@@ -32,3 +32,4 @@ cov_html/
# temp
shards/all_in_one_with_mysql/metadata/
shards/mishards/.env
*.swp
......@@ -2,6 +2,17 @@
Please mark all change in change log and use the issue from GitHub
# Milvus 0.9.0 (TBD)
## Bug
## Feature
## Improvement
- \#221 Refactor LOG macro
## Task
# Milvus 0.8.0 (TBD)
## Bug
......
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FORMAT = "[%datetime][%level]%msg"
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-global.log"
ENABLED = true
TO_FILE = true
......@@ -24,4 +24,4 @@
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"
\ No newline at end of file
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"
......@@ -76,7 +76,7 @@ bool
Cache<ItemObj>::reserve(const int64_t item_size) {
std::lock_guard<std::mutex> lock(mutex_);
if (item_size > capacity_) {
SERVER_LOG_ERROR << header_ << " item size " << (item_size >> 20) << "MB too big to insert into cache capacity"
LOG_SERVER_ERROR_ << header_ << " item size " << (item_size >> 20) << "MB too big to insert into cache capacity"
<< (capacity_ >> 20) << "MB";
return false;
}
......@@ -92,7 +92,7 @@ Cache<ItemObj>::clear() {
std::lock_guard<std::mutex> lock(mutex_);
lru_.clear();
usage_ = 0;
SERVER_LOG_DEBUG << header_ << " Clear cache !";
LOG_SERVER_DEBUG_ << header_ << " Clear cache !";
}
......@@ -102,9 +102,9 @@ Cache<ItemObj>::print() {
std::lock_guard<std::mutex> lock(mutex_);
size_t cache_count = lru_.size();
// for (auto it = lru_.begin(); it != lru_.end(); ++it) {
// SERVER_LOG_DEBUG << it->first;
// LOG_SERVER_DEBUG_ << it->first;
// }
SERVER_LOG_DEBUG << header_ << " [item count]: " << cache_count << ", [usage] " << (usage_ >> 20)
LOG_SERVER_DEBUG_ << header_ << " [item count]: " << cache_count << ", [usage] " << (usage_ >> 20)
<< "MB, [capacity] " << (capacity_ >> 20) << "MB";
}
......@@ -128,15 +128,15 @@ Cache<ItemObj>::insert_internal(const std::string& key, const ItemObj& item) {
// if usage exceed capacity, free some items
if (usage_ > capacity_) {
SERVER_LOG_DEBUG << header_ << " Current usage " << (usage_ >> 20) << "MB is too high for capacity "
LOG_SERVER_DEBUG_ << header_ << " Current usage " << (usage_ >> 20) << "MB is too high for capacity "
<< (capacity_ >> 20) << "MB, start free memory";
free_memory_internal(capacity_);
}
// insert new item
lru_.put(key, item);
SERVER_LOG_DEBUG << header_ << " Insert " << key << " size: " << (item_size >> 20) << "MB into cache";
SERVER_LOG_DEBUG << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
LOG_SERVER_DEBUG_ << header_ << " Insert " << key << " size: " << (item_size >> 20) << "MB into cache";
LOG_SERVER_DEBUG_ << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
<< (capacity_ >> 20) << "MB";
}
......@@ -153,8 +153,8 @@ Cache<ItemObj>::erase_internal(const std::string& key) {
lru_.erase(key);
usage_ -= item_size;
SERVER_LOG_DEBUG << header_ << " Erase " << key << " size: " << (item_size >> 20) << "MB from cache";
SERVER_LOG_DEBUG << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
LOG_SERVER_DEBUG_ << header_ << " Erase " << key << " size: " << (item_size >> 20) << "MB from cache";
LOG_SERVER_DEBUG_ << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
<< (capacity_ >> 20) << "MB";
}
......@@ -180,7 +180,7 @@ Cache<ItemObj>::free_memory_internal(const int64_t target_size) {
++it;
}
SERVER_LOG_DEBUG << header_ << " To be released memory size: " << (released_size >> 20) << "MB";
LOG_SERVER_DEBUG_ << header_ << " To be released memory size: " << (released_size >> 20) << "MB";
for (auto& key : key_array) {
erase_internal(key);
......
......@@ -24,7 +24,7 @@ template <typename ItemObj>
uint64_t
CacheMgr<ItemObj>::ItemCount() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return (uint64_t)(cache_->size());
......@@ -34,7 +34,7 @@ template <typename ItemObj>
bool
CacheMgr<ItemObj>::ItemExists(const std::string& key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return false;
}
return cache_->exists(key);
......@@ -44,7 +44,7 @@ template <typename ItemObj>
ItemObj
CacheMgr<ItemObj>::GetItem(const std::string& key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return nullptr;
}
server::Metrics::GetInstance().CacheAccessTotalIncrement();
......@@ -55,7 +55,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::InsertItem(const std::string& key, const ItemObj& data) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->insert(key, data);
......@@ -66,7 +66,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::EraseItem(const std::string& key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->erase(key);
......@@ -77,7 +77,7 @@ template <typename ItemObj>
bool
CacheMgr<ItemObj>::Reserve(const int64_t size) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return false;
}
return cache_->reserve(size);
......@@ -87,7 +87,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::PrintInfo() {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->print();
......@@ -97,7 +97,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::ClearCache() {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->clear();
......@@ -107,7 +107,7 @@ template <typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheUsage() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return cache_->usage();
......@@ -117,7 +117,7 @@ template <typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheCapacity() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return cache_->capacity();
......@@ -127,7 +127,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->set_capacity(capacity);
......
......@@ -44,14 +44,14 @@ DefaultDeletedDocsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Del
int del_fd = open(del_file_path.c_str(), O_RDONLY, 00664);
if (del_fd == -1) {
std::string err_msg = "Failed to open file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(del_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -61,7 +61,7 @@ DefaultDeletedDocsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Del
if (::read(del_fd, deleted_docs_list.data(), num_bytes) == -1) {
std::string err_msg = "Failed to read from file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -69,7 +69,7 @@ DefaultDeletedDocsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Del
if (::close(del_fd) == -1) {
std::string err_msg = "Failed to close file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}
......@@ -92,7 +92,7 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
int del_fd = open(temp_path.c_str(), O_RDWR | O_CREAT, 00664);
if (del_fd == -1) {
std::string err_msg = "Failed to open file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
......@@ -100,7 +100,7 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
if (exists) {
if (::read(del_fd, &old_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
} else {
......@@ -114,12 +114,12 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
int off = lseek(del_fd, 0, SEEK_SET);
if (off == -1) {
std::string err_msg = "Failed to seek file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(del_fd, &new_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to write to file" + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -127,18 +127,18 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
off = lseek(del_fd, 0, SEEK_END);
if (off == -1) {
std::string err_msg = "Failed to seek file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(del_fd, deleted_docs_list.data(), sizeof(segment::offset_t) * deleted_docs->GetSize()) == -1) {
std::string err_msg = "Failed to write to file" + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(del_fd) == -1) {
std::string err_msg = "Failed to close file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -156,14 +156,14 @@ DefaultDeletedDocsFormat::readSize(const storage::FSHandlerPtr& fs_ptr, size_t&
int del_fd = open(del_file_path.c_str(), O_RDONLY, 00664);
if (del_fd == -1) {
std::string err_msg = "Failed to open file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(del_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -171,7 +171,7 @@ DefaultDeletedDocsFormat::readSize(const storage::FSHandlerPtr& fs_ptr, size_t&
if (::close(del_fd) == -1) {
std::string err_msg = "Failed to close file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}
......
......@@ -40,7 +40,7 @@ DefaultIdBloomFilterFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::I
if (bloom_filter == nullptr) {
std::string err_msg =
"Failed to read bloom filter from file: " + bloom_filter_file_path + ". " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_UNEXPECTED_ERROR, err_msg);
}
id_bloom_filter_ptr = std::make_shared<segment::IdBloomFilter>(bloom_filter);
......@@ -56,7 +56,7 @@ DefaultIdBloomFilterFormat::write(const storage::FSHandlerPtr& fs_ptr,
if (scaling_bloom_flush(id_bloom_filter_ptr->GetBloomFilter()) == -1) {
std::string err_msg =
"Failed to write bloom filter to file: " + bloom_filter_file_path + ". " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_UNEXPECTED_ERROR, err_msg);
}
}
......@@ -71,7 +71,7 @@ DefaultIdBloomFilterFormat::create(const storage::FSHandlerPtr& fs_ptr,
if (bloom_filter == nullptr) {
std::string err_msg =
"Failed to read bloom filter from file: " + bloom_filter_file_path + ". " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_UNEXPECTED_ERROR, err_msg);
}
id_bloom_filter_ptr = std::make_shared<segment::IdBloomFilter>(bloom_filter);
......
......@@ -37,13 +37,13 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
recorder.RecordSection("Start");
if (!fs_ptr->reader_ptr_->open(path)) {
ENGINE_LOG_ERROR << "Fail to open vector index: " << path;
LOG_ENGINE_ERROR_ << "Fail to open vector index: " << path;
return nullptr;
}
int64_t length = fs_ptr->reader_ptr_->length();
if (length <= 0) {
ENGINE_LOG_ERROR << "Invalid vector index length: " << path;
LOG_ENGINE_ERROR_ << "Invalid vector index length: " << path;
return nullptr;
}
......@@ -55,7 +55,7 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
rp += sizeof(current_type);
fs_ptr->reader_ptr_->seekg(rp);
ENGINE_LOG_DEBUG << "Start to read_index(" << path << ") length: " << length << " bytes";
LOG_ENGINE_DEBUG_ << "Start to read_index(" << path << ") length: " << length << " bytes";
while (rp < length) {
size_t meta_length;
fs_ptr->reader_ptr_->read(&meta_length, sizeof(meta_length));
......@@ -85,7 +85,7 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
double span = recorder.RecordSection("End");
double rate = length * 1000000.0 / span / 1024 / 1024;
ENGINE_LOG_DEBUG << "read_index(" << path << ") rate " << rate << "MB/s";
LOG_ENGINE_DEBUG_ << "read_index(" << path << ") rate " << rate << "MB/s";
knowhere::VecIndexFactory& vec_index_factory = knowhere::VecIndexFactory::GetInstance();
auto index =
......@@ -94,7 +94,7 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
index->Load(load_data_list);
index->SetIndexSize(length);
} else {
ENGINE_LOG_ERROR << "Fail to create vector index: " << path;
LOG_ENGINE_ERROR_ << "Fail to create vector index: " << path;
}
return index;
......@@ -108,7 +108,7 @@ DefaultVectorIndexFormat::read(const storage::FSHandlerPtr& fs_ptr, const std::s
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}
......@@ -132,7 +132,7 @@ DefaultVectorIndexFormat::write(const storage::FSHandlerPtr& fs_ptr, const std::
recorder.RecordSection("Start");
if (!fs_ptr->writer_ptr_->open(location)) {
ENGINE_LOG_ERROR << "Fail to open vector index: " << location;
LOG_ENGINE_ERROR_ << "Fail to open vector index: " << location;
return;
}
......@@ -153,7 +153,7 @@ DefaultVectorIndexFormat::write(const storage::FSHandlerPtr& fs_ptr, const std::
double span = recorder.RecordSection("End");
double rate = fs_ptr->writer_ptr_->length() * 1000000.0 / span / 1024 / 1024;
ENGINE_LOG_DEBUG << "write_index(" << location << ") rate " << rate << "MB/s";
LOG_ENGINE_DEBUG_ << "write_index(" << location << ") rate " << rate << "MB/s";
}
} // namespace codec
......
......@@ -36,14 +36,14 @@ DefaultVectorsFormat::read_vectors_internal(const std::string& file_path, off_t
int rv_fd = open(file_path.c_str(), O_RDONLY, 00664);
if (rv_fd == -1) {
std::string err_msg = "Failed to open file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(rv_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -53,20 +53,20 @@ DefaultVectorsFormat::read_vectors_internal(const std::string& file_path, off_t
int off = lseek(rv_fd, offset, SEEK_SET);
if (off == -1) {
std::string err_msg = "Failed to seek file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
raw_vectors.resize(num / sizeof(uint8_t));
if (::read(rv_fd, raw_vectors.data(), num) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(rv_fd) == -1) {
std::string err_msg = "Failed to close file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}
......@@ -76,27 +76,27 @@ DefaultVectorsFormat::read_uids_internal(const std::string& file_path, std::vect
int uid_fd = open(file_path.c_str(), O_RDONLY, 00664);
if (uid_fd == -1) {
std::string err_msg = "Failed to open file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(uid_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
uids.resize(num_bytes / sizeof(segment::doc_id_t));
if (::read(uid_fd, uids.data(), num_bytes) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(uid_fd) == -1) {
std::string err_msg = "Failed to close file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}
......@@ -108,7 +108,7 @@ DefaultVectorsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Vectors
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}
......@@ -147,24 +147,24 @@ DefaultVectorsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segment::
int rv_fd = open(rv_file_path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 00664);
if (rv_fd == -1) {
std::string err_msg = "Failed to open file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t rv_num_bytes = vectors->GetData().size() * sizeof(uint8_t);
if (::write(rv_fd, &rv_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to write to file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(rv_fd, vectors->GetData().data(), rv_num_bytes) == -1) {
std::string err_msg = "Failed to write to file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(rv_fd) == -1) {
std::string err_msg = "Failed to close file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -173,23 +173,23 @@ DefaultVectorsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segment::
int uid_fd = open(uid_file_path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 00664);
if (uid_fd == -1) {
std::string err_msg = "Failed to open file: " + uid_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t uid_num_bytes = vectors->GetUids().size() * sizeof(segment::doc_id_t);
if (::write(uid_fd, &uid_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to write to file" + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(uid_fd, vectors->GetUids().data(), uid_num_bytes) == -1) {
std::string err_msg = "Failed to write to file" + uid_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(uid_fd) == -1) {
std::string err_msg = "Failed to close file: " + uid_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
......@@ -203,7 +203,7 @@ DefaultVectorsFormat::read_uids(const storage::FSHandlerPtr& fs_ptr, std::vector
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}
......@@ -228,7 +228,7 @@ DefaultVectorsFormat::read_vectors(const storage::FSHandlerPtr& fs_ptr, off_t of
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}
......
......@@ -668,7 +668,7 @@ Config::CheckConfigVersion(const std::string& value) {
if (exist_error) {
std::string msg = "Invalid config version: " + value +
". Expected config version: " + milvus_config_version_map.at(MILVUS_VERSION);
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
}
......
......@@ -187,18 +187,18 @@ ConfigNode::ClearSequences() {
void
ConfigNode::PrintAll(const std::string& prefix) const {
for (auto& elem : config_) {
SERVER_LOG_INFO << prefix << elem.first + ": " << elem.second;
LOG_SERVER_INFO_ << prefix << elem.first + ": " << elem.second;
}
for (auto& elem : sequences_) {
SERVER_LOG_INFO << prefix << elem.first << ": ";
LOG_SERVER_INFO_ << prefix << elem.first << ": ";
for (auto& str : elem.second) {
SERVER_LOG_INFO << prefix << " - " << str;
LOG_SERVER_INFO_ << prefix << " - " << str;
}
}
for (auto& elem : children_) {
SERVER_LOG_INFO << prefix << elem.first << ": ";
LOG_SERVER_INFO_ << prefix << elem.first << ": ";
elem.second.PrintAll(prefix + " ");
}
}
......
......@@ -30,7 +30,7 @@ YamlConfigMgr::LoadConfigFile(const std::string& filename) {
void
YamlConfigMgr::Print() const {
SERVER_LOG_INFO << "System config content:";
LOG_SERVER_INFO_ << "System config content:";
config_.PrintAll();
}
......
此差异已折叠。
......@@ -100,7 +100,7 @@ Status
SafeIDGenerator::NextIDNumbers(size_t n, IDNumbers& ids) {
if (n <= 0 || n > MAX_IDS_PER_MICRO) {
std::string msg = "Invalid ID number: " + std::to_string(n);
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(SERVER_UNEXPECTED_ERROR, msg);
}
......
......@@ -94,8 +94,8 @@ OngoingFileChecker::MarkOngoingFileNoLock(const meta::SegmentSchema& table_file)
}
}
ENGINE_LOG_DEBUG << "Mark ongoing file:" << table_file.file_id_
<< " refcount:" << ongoing_files_[table_file.collection_id_][table_file.file_id_];
LOG_ENGINE_DEBUG_ << "Mark ongoing file:" << table_file.file_id_
<< " refcount:" << ongoing_files_[table_file.collection_id_][table_file.file_id_];
return Status::OK();
}
......@@ -112,7 +112,7 @@ OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::SegmentSchema& table_fil
if (it_file != iter->second.end()) {
it_file->second--;
ENGINE_LOG_DEBUG << "Unmark ongoing file:" << table_file.file_id_ << " refcount:" << it_file->second;
LOG_ENGINE_DEBUG_ << "Unmark ongoing file:" << table_file.file_id_ << " refcount:" << it_file->second;
if (it_file->second <= 0) {
iter->second.erase(table_file.file_id_);
......
......@@ -54,11 +54,11 @@ ArchiveConf::ParseCritirias(const std::string& criterias) {
std::vector<std::string> kv;
boost::algorithm::split(kv, token, boost::is_any_of(":"));
if (kv.size() != 2) {
ENGINE_LOG_WARNING << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
LOG_ENGINE_WARNING_ << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
continue;
}
if (kv[0] != "disk" && kv[0] != "days") {
ENGINE_LOG_WARNING << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
LOG_ENGINE_WARNING_ << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
continue;
}
try {
......@@ -68,11 +68,11 @@ ArchiveConf::ParseCritirias(const std::string& criterias) {
criterias_[kv[0]] = value;
} catch (std::out_of_range&) {
std::string msg = "Out of range: '" + kv[1] + "'";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw InvalidArgumentException(msg);
} catch (...) {
std::string msg = "Invalid argument: '" + kv[1] + "'";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw InvalidArgumentException(msg);
}
}
......
......@@ -84,7 +84,7 @@ CreateCollectionPath(const DBMetaOptions& options, const std::string& collection
std::string table_path = db_path + TABLES_FOLDER + collection_id;
auto status = server::CommonUtil::CreateDirectory(table_path);
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
......@@ -93,7 +93,7 @@ CreateCollectionPath(const DBMetaOptions& options, const std::string& collection
status = server::CommonUtil::CreateDirectory(table_path);
fiu_do_on("CreateCollectionPath.creat_slave_path", status = Status(DB_INVALID_PATH, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
}
......@@ -110,10 +110,10 @@ DeleteCollectionPath(const DBMetaOptions& options, const std::string& collection
std::string table_path = path + TABLES_FOLDER + collection_id;
if (force) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path;
LOG_ENGINE_DEBUG_ << "Remove collection folder: " << table_path;
} else if (boost::filesystem::exists(table_path) && boost::filesystem::is_empty(table_path)) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path;
LOG_ENGINE_DEBUG_ << "Remove collection folder: " << table_path;
}
}
......@@ -141,7 +141,7 @@ CreateCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& tabl
auto status = server::CommonUtil::CreateDirectory(parent_path);
fiu_do_on("CreateCollectionFilePath.fail_create", status = Status(DB_INVALID_PATH, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
......@@ -181,7 +181,7 @@ GetCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_f
std::string msg = "Collection file doesn't exist: " + file_path;
if (table_file.file_size_ > 0) { // no need to pop error for empty file
ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for collection: " << table_file.collection_id_;
LOG_ENGINE_ERROR_ << msg << " in path: " << options.path_ << " for collection: " << table_file.collection_id_;
}
return Status(DB_ERROR, msg);
......
......@@ -22,11 +22,11 @@ ExecutionEnginePtr
EngineFactory::Build(uint16_t dimension, const std::string& location, EngineType index_type, MetricType metric_type,
const milvus::json& index_params) {
if (index_type == EngineType::INVALID) {
ENGINE_LOG_ERROR << "Unsupported engine type";
LOG_ENGINE_ERROR_ << "Unsupported engine type";
return nullptr;
}
ENGINE_LOG_DEBUG << "EngineFactory index type: " << (int)index_type;
LOG_ENGINE_DEBUG_ << "EngineFactory index type: " << (int)index_type;
ExecutionEnginePtr execution_engine_ptr =
std::make_shared<ExecutionEngineImpl>(dimension, location, index_type, metric_type, index_params);
......
......@@ -131,7 +131,7 @@ ExecutionEngineImpl::ExecutionEngineImpl(uint16_t dimension, const std::string&
conf[knowhere::meta::DEVICEID] = gpu_num_;
conf[knowhere::meta::DIM] = dimension;
MappingMetricType(metric_type, conf);
ENGINE_LOG_DEBUG << "Index params: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index params: " << conf.dump();
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckTrain(conf, index_->index_mode())) {
throw Exception(DB_ERROR, "Illegal index params");
......@@ -223,13 +223,13 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
break;
}
default: {
ENGINE_LOG_ERROR << "Unsupported index type " << (int)type;
LOG_ENGINE_ERROR_ << "Unsupported index type " << (int)type;
return nullptr;
}
}
if (index == nullptr) {
std::string err_msg = "Invalid index type " + std::to_string((int)type) + " mod " + std::to_string((int)mode);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(DB_ERROR, err_msg);
}
return index;
......@@ -240,7 +240,7 @@ ExecutionEngineImpl::HybridLoad() const {
#ifdef MILVUS_GPU_VERSION
auto hybrid_index = std::dynamic_pointer_cast<knowhere::IVFSQHybrid>(index_);
if (hybrid_index == nullptr) {
ENGINE_LOG_WARNING << "HybridLoad only support with IVFSQHybrid";
LOG_ENGINE_WARNING_ << "HybridLoad only support with IVFSQHybrid";
return;
}
......@@ -250,7 +250,7 @@ ExecutionEngineImpl::HybridLoad() const {
std::vector<int64_t> gpus;
Status s = config.GetGpuResourceConfigSearchResources(gpus);
if (!s.ok()) {
ENGINE_LOG_ERROR << s.message();
LOG_ENGINE_ERROR_ << s.message();
return;
}
......@@ -289,9 +289,9 @@ ExecutionEngineImpl::HybridLoad() const {
milvus::json quantizer_conf{{knowhere::meta::DEVICEID, best_device_id}, {"mode", 1}};
auto quantizer = hybrid_index->LoadQuantizer(quantizer_conf);
ENGINE_LOG_DEBUG << "Quantizer params: " << quantizer_conf.dump();
LOG_ENGINE_DEBUG_ << "Quantizer params: " << quantizer_conf.dump();
if (quantizer == nullptr) {
ENGINE_LOG_ERROR << "quantizer is nullptr";
LOG_ENGINE_ERROR_ << "quantizer is nullptr";
}
hybrid_index->SetQuantizer(quantizer);
auto cache_quantizer = std::make_shared<CachedQuantizer>(quantizer);
......@@ -328,7 +328,7 @@ ExecutionEngineImpl::AddWithIds(int64_t n, const uint8_t* xdata, const int64_t*
size_t
ExecutionEngineImpl::Count() const {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, return count 0";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, return count 0";
return 0;
}
return index_->Count();
......@@ -337,7 +337,7 @@ ExecutionEngineImpl::Count() const {
size_t
ExecutionEngineImpl::Dimension() const {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, return dimension " << dim_;
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, return dimension " << dim_;
return dim_;
}
return index_->Dim();
......@@ -346,7 +346,7 @@ ExecutionEngineImpl::Dimension() const {
size_t
ExecutionEngineImpl::Size() const {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, return size 0";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, return size 0";
return 0;
}
return index_->Size();
......@@ -363,7 +363,7 @@ ExecutionEngineImpl::Serialize() {
// here we reset index size by file size,
// since some index type(such as SQ8) data size become smaller after serialized
index_->SetIndexSize(server::CommonUtil::GetFileSize(location_));
ENGINE_LOG_DEBUG << "Finish serialize index file: " << location_ << " size: " << index_->Size();
LOG_ENGINE_DEBUG_ << "Finish serialize index file: " << location_ << " size: " << index_->Size();
if (index_->Size() == 0) {
std::string msg = "Failed to serialize file: " + location_ + " reason: out of disk space or memory";
......@@ -394,7 +394,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
milvus::json conf{{knowhere::meta::DEVICEID, gpu_num_}, {knowhere::meta::DIM, dim_}};
MappingMetricType(metric_type_, conf);
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
ENGINE_LOG_DEBUG << "Index params: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index params: " << conf.dump();
if (!adapter->CheckTrain(conf, index_->index_mode())) {
throw Exception(DB_ERROR, "Illegal index params");
}
......@@ -402,7 +402,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
auto status = segment_reader_ptr->Load();
if (!status.ok()) {
std::string msg = "Failed to load segment from " + location_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
......@@ -413,7 +413,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
auto vectors_uids = vectors->GetUids();
index_->SetUids(vectors_uids);
ENGINE_LOG_DEBUG << "set uids " << index_->GetUids().size() << " for index " << location_;
LOG_ENGINE_DEBUG_ << "set uids " << index_->GetUids().size() << " for index " << location_;
auto vectors_data = vectors->GetData();
......@@ -438,7 +438,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
bin_bf_index->SetBlacklist(concurrent_bitset_ptr);
}
ENGINE_LOG_DEBUG << "Finished loading raw data from segment " << segment_dir;
LOG_ENGINE_DEBUG_ << "Finished loading raw data from segment " << segment_dir;
} else {
try {
segment::SegmentPtr segment_ptr;
......@@ -448,14 +448,14 @@ ExecutionEngineImpl::Load(bool to_cache) {
if (index_ == nullptr) {
std::string msg = "Failed to load index from " + location_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
} else {
segment::DeletedDocsPtr deleted_docs_ptr;
auto status = segment_reader_ptr->LoadDeletedDocs(deleted_docs_ptr);
if (!status.ok()) {
std::string msg = "Failed to load deleted docs from " + location_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
auto& deleted_docs = deleted_docs_ptr->GetDeletedDocs();
......@@ -473,12 +473,12 @@ ExecutionEngineImpl::Load(bool to_cache) {
std::vector<segment::doc_id_t> uids;
segment_reader_ptr->LoadUids(uids);
index_->SetUids(uids);
ENGINE_LOG_DEBUG << "set uids " << index_->GetUids().size() << " for index " << location_;
LOG_ENGINE_DEBUG_ << "set uids " << index_->GetUids().size() << " for index " << location_;
ENGINE_LOG_DEBUG << "Finished loading index file from segment " << segment_dir;
LOG_ENGINE_DEBUG_ << "Finished loading index file from segment " << segment_dir;
}
} catch (std::exception& e) {
ENGINE_LOG_ERROR << e.what();
LOG_ENGINE_ERROR_ << e.what();
return Status(DB_ERROR, e.what());
}
}
......@@ -552,7 +552,7 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
index_ = index;
} else {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to gpu";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to copy to gpu";
return Status(DB_ERROR, "index is null");
}
......@@ -561,14 +561,14 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
* Add lock here to avoid multiple INDEX are copied to one GPU card at same time.
* And reserve space to avoid GPU out of memory issue.
*/
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id << " start";
LOG_ENGINE_DEBUG_ << "CPU to GPU" << device_id << " start";
auto gpu_cache_mgr = cache::GpuCacheMgr::GetInstance(device_id);
// gpu_cache_mgr->Reserve(index_->Size());
index_ = knowhere::cloner::CopyCpuToGpu(index_, device_id, knowhere::Config());
// gpu_cache_mgr->InsertItem(location_, std::static_pointer_cast<cache::DataObj>(index_));
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id << " finished";
LOG_ENGINE_DEBUG_ << "CPU to GPU" << device_id << " finished";
} catch (std::exception& e) {
ENGINE_LOG_ERROR << e.what();
LOG_ENGINE_ERROR_ << e.what();
return Status(DB_ERROR, e.what());
}
}
......@@ -599,15 +599,15 @@ ExecutionEngineImpl::CopyToCpu() {
index_ = index;
} else {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to cpu";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to copy to cpu";
return Status(DB_ERROR, "index is null");
}
try {
index_ = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
ENGINE_LOG_DEBUG << "GPU to CPU";
LOG_ENGINE_DEBUG_ << "GPU to CPU";
} catch (std::exception& e) {
ENGINE_LOG_ERROR << e.what();
LOG_ENGINE_ERROR_ << e.what();
return Status(DB_ERROR, e.what());
}
}
......@@ -617,19 +617,19 @@ ExecutionEngineImpl::CopyToCpu() {
}
return Status::OK();
#else
ENGINE_LOG_ERROR << "Calling ExecutionEngineImpl::CopyToCpu when using CPU version";
LOG_ENGINE_ERROR_ << "Calling ExecutionEngineImpl::CopyToCpu when using CPU version";
return Status(DB_ERROR, "Calling ExecutionEngineImpl::CopyToCpu when using CPU version");
#endif
}
ExecutionEnginePtr
ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_type) {
ENGINE_LOG_DEBUG << "Build index file: " << location << " from: " << location_;
LOG_ENGINE_DEBUG_ << "Build index file: " << location << " from: " << location_;
auto from_index = std::dynamic_pointer_cast<knowhere::IDMAP>(index_);
auto bin_from_index = std::dynamic_pointer_cast<knowhere::BinaryIDMAP>(index_);
if (from_index == nullptr && bin_from_index == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: from_index is null, failed to build index";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: from_index is null, failed to build index";
return nullptr;
}
......@@ -643,12 +643,12 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
conf[knowhere::meta::ROWS] = Count();
conf[knowhere::meta::DEVICEID] = gpu_num_;
MappingMetricType(metric_type_, conf);
ENGINE_LOG_DEBUG << "Index params: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index params: " << conf.dump();
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(to_index->index_type());
if (!adapter->CheckTrain(conf, to_index->index_mode())) {
throw Exception(DB_ERROR, "Illegal index params");
}
ENGINE_LOG_DEBUG << "Index config: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index config: " << conf.dump();
std::vector<segment::doc_id_t> uids;
faiss::ConcurrentBitsetPtr blacklist;
......@@ -675,13 +675,13 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
#endif
to_index->SetUids(uids);
ENGINE_LOG_DEBUG << "Set " << to_index->GetUids().size() << "uids for " << location;
LOG_ENGINE_DEBUG_ << "Set " << to_index->GetUids().size() << "uids for " << location;
if (blacklist != nullptr) {
to_index->SetBlacklist(blacklist);
ENGINE_LOG_DEBUG << "Set blacklist for index " << location;
LOG_ENGINE_DEBUG_ << "Set blacklist for index " << location;
}
ENGINE_LOG_DEBUG << "Finish build index: " << location;
LOG_ENGINE_DEBUG_ << "Finish build index: " << location;
return std::make_shared<ExecutionEngineImpl>(to_index, location, engine_type, metric_type_, index_params_);
}
......@@ -766,7 +766,7 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu
TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search float", "search", 0));
if (index_ == nullptr) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
return Status(DB_ERROR, "index is null");
}
......@@ -774,7 +774,7 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu
conf[knowhere::meta::TOPK] = k;
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Illegal search params", "search", 0);
throw Exception(DB_ERROR, "Illegal search params");
}
......@@ -787,8 +787,8 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu
auto result = index_->Query(dataset, conf);
rc.RecordSection("query done");
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
MapAndCopyResult(result, index_->GetUids(), n, k, distances, labels);
rc.RecordSection("map uids " + std::to_string(n * k));
......@@ -805,7 +805,7 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil
TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search uint8", "search", 0));
if (index_ == nullptr) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
return Status(DB_ERROR, "index is null");
}
......@@ -813,7 +813,7 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil
conf[knowhere::meta::TOPK] = k;
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Illegal search params", "search", 0);
throw Exception(DB_ERROR, "Illegal search params");
}
......@@ -826,8 +826,8 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil
auto result = index_->Query(dataset, conf);
rc.RecordSection("query done");
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
MapAndCopyResult(result, index_->GetUids(), n, k, distances, labels);
rc.RecordSection("map uids " + std::to_string(n * k));
......@@ -844,7 +844,7 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search vector of ids", "search", 0));
if (index_ == nullptr) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
return Status(DB_ERROR, "index is null");
}
......@@ -852,7 +852,7 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
conf[knowhere::meta::TOPK] = k;
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Illegal search params", "search", 0);
throw Exception(DB_ERROR, "Illegal search params");
}
......@@ -906,8 +906,8 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
auto result = index_->QueryById(dataset, conf);
rc.RecordSection("query by id done");
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
MapAndCopyResult(result, uids, offsets.size(), k, distances, labels);
rc.RecordSection("map uids " + std::to_string(offsets.size() * k));
}
......@@ -922,7 +922,7 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
Status
ExecutionEngineImpl::GetVectorByID(const int64_t& id, float* vector, bool hybrid) {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to search";
return Status(DB_ERROR, "index is null");
}
......@@ -947,11 +947,11 @@ ExecutionEngineImpl::GetVectorByID(const int64_t& id, float* vector, bool hybrid
Status
ExecutionEngineImpl::GetVectorByID(const int64_t& id, uint8_t* vector, bool hybrid) {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to search";
return Status(DB_ERROR, "index is null");
}
ENGINE_LOG_DEBUG << "Get binary vector by id: " << id;
LOG_ENGINE_DEBUG_ << "Get binary vector by id: " << id;
if (hybrid) {
HybridLoad();
......
......@@ -36,7 +36,7 @@ MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length,
const float* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) {
flushed_tables.clear();
if (GetCurrentMem() > options_.insert_buffer_size_) {
ENGINE_LOG_DEBUG << "Insert buffer size exceeds limit. Performing force flush";
LOG_ENGINE_DEBUG_ << "Insert buffer size exceeds limit. Performing force flush";
// TODO(zhiru): Don't apply delete here in order to avoid possible concurrency issues with Merge
auto status = Flush(flushed_tables, false);
if (!status.ok()) {
......@@ -62,12 +62,12 @@ MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length,
const uint8_t* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) {
flushed_tables.clear();
if (GetCurrentMem() > options_.insert_buffer_size_) {
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] ", "insert", 0)
<< "Insert buffer size exceeds limit. Performing force flush";
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] ", "insert", 0)
<< "Insert buffer size exceeds limit. Performing force flush";
// TODO(zhiru): Don't apply delete here in order to avoid possible concurrency issues with Merge
auto status = Flush(flushed_tables, false);
if (!status.ok()) {
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] ", "insert", 0) << "Flush fail: " << status.message();
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] ", "insert", 0) << "Flush fail: " << status.message();
return status;
}
}
......@@ -143,13 +143,13 @@ MemManagerImpl::Flush(const std::string& collection_id, bool apply_delete) {
std::unique_lock<std::mutex> lock(serialization_mtx_);
auto max_lsn = GetMaxLSN(temp_immutable_list);
for (auto& mem : temp_immutable_list) {
ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushing collection: " << mem->GetTableId();
auto status = mem->Serialize(max_lsn, apply_delete);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed";
LOG_ENGINE_ERROR_ << "Flush collection " << mem->GetTableId() << " failed";
return status;
}
ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushed collection: " << mem->GetTableId();
}
return Status::OK();
......@@ -169,14 +169,14 @@ MemManagerImpl::Flush(std::set<std::string>& table_ids, bool apply_delete) {
table_ids.clear();
auto max_lsn = GetMaxLSN(temp_immutable_list);
for (auto& mem : temp_immutable_list) {
ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushing collection: " << mem->GetTableId();
auto status = mem->Serialize(max_lsn, apply_delete);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed";
LOG_ENGINE_ERROR_ << "Flush collection " << mem->GetTableId() << " failed";
return status;
}
table_ids.insert(mem->GetTableId());
ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushed collection: " << mem->GetTableId();
}
meta_->SetGlobalLastLSN(max_lsn);
......@@ -194,7 +194,7 @@ MemManagerImpl::ToImmutable(const std::string& collection_id) {
mem_id_map_.erase(memIt);
}
// std::string err_msg = "Could not find collection = " + collection_id + " to flush";
// ENGINE_LOG_ERROR << err_msg;
// LOG_ENGINE_ERROR_ << err_msg;
// return Status(DB_NOT_FOUND, err_msg);
}
......
......@@ -53,7 +53,7 @@ MemTable::Add(const VectorSourcePtr& source) {
if (!status.ok()) {
std::string err_msg = "Insert failed: " + status.ToString();
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ", "insert", 0) << err_msg;
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ", "insert", 0) << err_msg;
return Status(DB_ERROR, err_msg);
}
}
......@@ -113,7 +113,7 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) {
return status;
}
ENGINE_LOG_DEBUG << "Flushed segment " << (*mem_table_file)->GetSegmentId();
LOG_ENGINE_DEBUG_ << "Flushed segment " << (*mem_table_file)->GetSegmentId();
{
std::lock_guard<std::mutex> lock(mutex_);
......@@ -125,13 +125,13 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) {
auto status = meta_->UpdateCollectionFlushLSN(collection_id_, wal_lsn);
if (!status.ok()) {
std::string err_msg = "Failed to write flush lsn to meta: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
ENGINE_LOG_DEBUG << "Finished flushing for collection " << collection_id_ << " in " << diff.count() << " s";
LOG_ENGINE_DEBUG_ << "Finished flushing for collection " << collection_id_ << " in " << diff.count() << " s";
return Status::OK();
}
......@@ -173,7 +173,7 @@ MemTable::ApplyDeletes() {
// Serialize segment's deletedDoc TODO(zhiru): append directly to previous file for now, may have duplicates
// Serialize bloom filter
ENGINE_LOG_DEBUG << "Applying " << doc_ids_to_delete_.size() << " deletes in collection: " << collection_id_;
LOG_ENGINE_DEBUG_ << "Applying " << doc_ids_to_delete_.size() << " deletes in collection: " << collection_id_;
auto start_total = std::chrono::high_resolution_clock::now();
......@@ -185,7 +185,7 @@ MemTable::ApplyDeletes() {
auto status = meta_->FilesByType(collection_id_, file_types, table_files);
if (!status.ok()) {
std::string err_msg = "Failed to apply deletes: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
......@@ -220,13 +220,14 @@ MemTable::ApplyDeletes() {
auto time0 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff0 = time0 - start_total;
ENGINE_LOG_DEBUG << "Found " << ids_to_check_map.size() << " segment to apply deletes in " << diff0.count() << " s";
LOG_ENGINE_DEBUG_ << "Found " << ids_to_check_map.size() << " segment to apply deletes in " << diff0.count()
<< " s";
meta::SegmentsSchema table_files_to_update;
for (auto& kv : ids_to_check_map) {
auto& table_file = table_files[kv.first];
ENGINE_LOG_DEBUG << "Applying deletes in segment: " << table_file.segment_id_;
LOG_ENGINE_DEBUG_ << "Applying deletes in segment: " << table_file.segment_id_;
auto time1 = std::chrono::high_resolution_clock::now();
......@@ -273,13 +274,13 @@ MemTable::ApplyDeletes() {
auto time2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff1 = time2 - time1;
ENGINE_LOG_DEBUG << "Loading uids and deleted docs took " << diff1.count() << " s";
LOG_ENGINE_DEBUG_ << "Loading uids and deleted docs took " << diff1.count() << " s";
std::sort(ids_to_check.begin(), ids_to_check.end());
auto time3 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff2 = time3 - time2;
ENGINE_LOG_DEBUG << "Sorting " << ids_to_check.size() << " ids took " << diff2.count() << " s";
LOG_ENGINE_DEBUG_ << "Sorting " << ids_to_check.size() << " ids took " << diff2.count() << " s";
size_t delete_count = 0;
auto find_diff = std::chrono::duration<double>::zero();
......@@ -315,9 +316,9 @@ MemTable::ApplyDeletes() {
}
}
ENGINE_LOG_DEBUG << "Finding " << ids_to_check.size() << " uids in " << uids.size() << " uids took "
<< find_diff.count() << " s in total";
ENGINE_LOG_DEBUG << "Setting deleted docs and bloom filter took " << set_diff.count() << " s in total";
LOG_ENGINE_DEBUG_ << "Finding " << ids_to_check.size() << " uids in " << uids.size() << " uids took "
<< find_diff.count() << " s in total";
LOG_ENGINE_DEBUG_ << "Setting deleted docs and bloom filter took " << set_diff.count() << " s in total";
auto time4 = std::chrono::high_resolution_clock::now();
......@@ -336,9 +337,9 @@ MemTable::ApplyDeletes() {
auto time5 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff4 = time5 - time4;
ENGINE_LOG_DEBUG << "Appended " << deleted_docs->GetSize()
<< " offsets to deleted docs in segment: " << table_file.segment_id_ << " in " << diff4.count()
<< " s";
LOG_ENGINE_DEBUG_ << "Appended " << deleted_docs->GetSize()
<< " offsets to deleted docs in segment: " << table_file.segment_id_ << " in "
<< diff4.count() << " s";
// start = std::chrono::high_resolution_clock::now();
......@@ -348,8 +349,8 @@ MemTable::ApplyDeletes() {
}
auto time6 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff5 = time6 - time5;
ENGINE_LOG_DEBUG << "Updated bloom filter in segment: " << table_file.segment_id_ << " in " << diff5.count()
<< " s";
LOG_ENGINE_DEBUG_ << "Updated bloom filter in segment: " << table_file.segment_id_ << " in " << diff5.count()
<< " s";
// Update collection file row count
for (auto& file : segment_files) {
......@@ -362,8 +363,8 @@ MemTable::ApplyDeletes() {
auto time7 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff6 = time7 - time6;
diff6 = time6 - time5;
ENGINE_LOG_DEBUG << "Update collection file row count in vector of segment: " << table_file.segment_id_
<< " in " << diff6.count() << " s";
LOG_ENGINE_DEBUG_ << "Update collection file row count in vector of segment: " << table_file.segment_id_
<< " in " << diff6.count() << " s";
}
auto time7 = std::chrono::high_resolution_clock::now();
......@@ -372,7 +373,7 @@ MemTable::ApplyDeletes() {
if (!status.ok()) {
std::string err_msg = "Failed to apply deletes: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
......@@ -380,9 +381,9 @@ MemTable::ApplyDeletes() {
auto end_total = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff7 = end_total - time7;
ENGINE_LOG_DEBUG << "Update deletes to meta in collection " << collection_id_ << " in " << diff7.count() << " s";
LOG_ENGINE_DEBUG_ << "Update deletes to meta in collection " << collection_id_ << " in " << diff7.count() << " s";
std::chrono::duration<double> diff_total = end_total - start_total;
ENGINE_LOG_DEBUG << "Finished deletes in collection " << collection_id_ << " in " << diff_total.count() << " s";
LOG_ENGINE_DEBUG_ << "Finished deletes in collection " << collection_id_ << " in " << diff_total.count() << " s";
OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_check);
......
......@@ -54,7 +54,7 @@ MemTableFile::CreateCollectionFile() {
table_file_schema_ = table_file_schema;
} else {
std::string err_msg = "MemTableFile::CreateCollectionFile failed: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
}
return status;
}
......@@ -65,7 +65,7 @@ MemTableFile::Add(const VectorSourcePtr& source) {
std::string err_msg =
"MemTableFile::Add: table_file_schema dimension = " + std::to_string(table_file_schema_.dimension_) +
", collection_id = " + table_file_schema_.collection_id_;
ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << err_msg;
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << err_msg;
return Status(DB_ERROR, "Not able to create collection file");
}
......@@ -160,7 +160,7 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
auto status = segment_writer_ptr_->Serialize();
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to serialize segment: " << table_file_schema_.segment_id_;
LOG_ENGINE_ERROR_ << "Failed to serialize segment: " << table_file_schema_.segment_id_;
/* Can't mark it as to_delete because data is stored in this mem collection file. Any further flush
* will try to serialize the same mem collection file and it won't be able to find the directory
......@@ -168,7 +168,7 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
*
table_file_schema_.file_type_ = meta::SegmentSchema::TO_DELETE;
meta_->UpdateCollectionFile(table_file_schema_);
ENGINE_LOG_DEBUG << "Failed to serialize segment, mark file: " << table_file_schema_.file_id_
LOG_ENGINE_DEBUG_ << "Failed to serialize segment, mark file: " << table_file_schema_.file_id_
<< " to to_delete";
*/
return status;
......@@ -194,8 +194,8 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
status = meta_->UpdateCollectionFile(table_file_schema_);
ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::SegmentSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes, lsn = " << wal_lsn;
LOG_ENGINE_DEBUG_ << "New " << ((table_file_schema_.file_type_ == meta::SegmentSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes, lsn = " << wal_lsn;
// TODO(zhiru): cache
/*
......
......@@ -40,7 +40,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance();
Status status = id_generator.GetNextIDNumbers(num_vectors_added, vector_ids_to_add);
if (!status.ok()) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "Generate ids fail: " << status.message();
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << "Generate ids fail: " << status.message();
return status;
}
} else {
......@@ -62,7 +62,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
vectors.resize(size);
memcpy(vectors.data(), vectors_.float_data_.data() + current_num_vectors_added * table_file_schema.dimension_,
size);
ENGINE_LOG_DEBUG << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
status = segment_writer_ptr->AddVectors(table_file_schema.file_id_, vectors, vector_ids_to_add);
} else if (!vectors_.binary_data_.empty()) {
......@@ -79,7 +79,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
vectors.data(),
vectors_.binary_data_.data() + current_num_vectors_added * SingleVectorSize(table_file_schema.dimension_),
size);
ENGINE_LOG_DEBUG << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
status = segment_writer_ptr->AddVectors(table_file_schema.file_id_, vectors, vector_ids_to_add);
}
......@@ -90,7 +90,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
vector_ids_.insert(vector_ids_.end(), std::make_move_iterator(vector_ids_to_add.begin()),
std::make_move_iterator(vector_ids_to_add.end()));
} else {
ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "VectorSource::Add failed: " + status.ToString();
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << "VectorSource::Add failed: " + status.ToString();
}
return status;
......
......@@ -50,18 +50,18 @@ MetaFactory::Build(const DBMetaOptions& metaOptions, const int& mode) {
utils::MetaUriInfo uri_info;
auto status = utils::ParseMetaUri(uri, uri_info);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Wrong URI format: URI = " << uri;
LOG_ENGINE_ERROR_ << "Wrong URI format: URI = " << uri;
throw InvalidArgumentException("Wrong URI format ");
}
if (strcasecmp(uri_info.dialect_.c_str(), "mysql") == 0) {
ENGINE_LOG_INFO << "Using MySQL";
LOG_ENGINE_INFO_ << "Using MySQL";
return std::make_shared<meta::MySQLMetaImpl>(metaOptions, mode);
} else if (strcasecmp(uri_info.dialect_.c_str(), "sqlite") == 0) {
ENGINE_LOG_INFO << "Using SQLite";
LOG_ENGINE_INFO_ << "Using SQLite";
return std::make_shared<meta::SqliteMetaImpl>(metaOptions);
} else {
ENGINE_LOG_ERROR << "Invalid dialect in URI: dialect = " << uri_info.dialect_;
LOG_ENGINE_ERROR_ << "Invalid dialect in URI: dialect = " << uri_info.dialect_;
throw InvalidArgumentException("URI dialect is not mysql / sqlite");
}
}
......
......@@ -36,8 +36,8 @@ void
MySQLConnectionPool::release(const mysqlpp::Connection* pc) {
mysqlpp::ConnectionPool::release(pc);
if (conns_in_use_ <= 0) {
ENGINE_LOG_WARNING << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = "
<< conns_in_use_;
LOG_ENGINE_WARNING_ << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = "
<< conns_in_use_;
} else {
--conns_in_use_;
}
......@@ -70,8 +70,8 @@ MySQLConnectionPool::create() {
user_.empty() ? 0 : user_.c_str(), password_.empty() ? 0 : password_.c_str(), port_);
return conn;
} catch (const mysqlpp::ConnectionFailed& er) {
ENGINE_LOG_ERROR << "Failed to connect to database server"
<< ": " << er.what();
LOG_ENGINE_ERROR_ << "Failed to connect to database server"
<< ": " << er.what();
return nullptr;
}
}
......
此差异已折叠。
......@@ -46,11 +46,11 @@ namespace {
Status
HandleException(const std::string& desc, const char* what = nullptr) {
if (what == nullptr) {
ENGINE_LOG_ERROR << desc;
LOG_ENGINE_ERROR_ << desc;
return Status(DB_META_TRANSACTION_FAILED, desc);
} else {
std::string msg = desc + ":" + what;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_META_TRANSACTION_FAILED, msg);
}
}
......@@ -145,7 +145,7 @@ SqliteMetaImpl::Initialize() {
fiu_do_on("SqliteMetaImpl.Initialize.fail_create_directory", ret = false);
if (!ret) {
std::string msg = "Failed to create db directory " + options_.path_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_INVALID_PATH, msg);
}
}
......@@ -198,7 +198,7 @@ SqliteMetaImpl::CreateCollection(CollectionSchema& collection_schema) {
return HandleException("Encounter exception when create collection", e.what());
}
ENGINE_LOG_DEBUG << "Successfully create collection: " << collection_schema.collection_id_;
LOG_ENGINE_DEBUG_ << "Successfully create collection: " << collection_schema.collection_id_;
return utils::CreateCollectionPath(options_, collection_schema.collection_id_);
} catch (std::exception& e) {
......@@ -319,7 +319,7 @@ SqliteMetaImpl::DropCollection(const std::string& collection_id) {
set(c(&CollectionSchema::state_) = (int)CollectionSchema::TO_DELETE),
where(c(&CollectionSchema::collection_id_) == collection_id and c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE));
ENGINE_LOG_DEBUG << "Successfully delete collection, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully delete collection, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection", e.what());
}
......@@ -343,7 +343,7 @@ SqliteMetaImpl::DeleteCollectionFiles(const std::string& collection_id) {
where(c(&SegmentSchema::collection_id_) == collection_id and
c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE));
ENGINE_LOG_DEBUG << "Successfully delete collection files, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully delete collection files, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection files", e.what());
}
......@@ -387,7 +387,7 @@ SqliteMetaImpl::CreateCollectionFile(SegmentSchema& file_schema) {
auto id = ConnectorPtr->insert(file_schema);
file_schema.id_ = id;
ENGINE_LOG_DEBUG << "Successfully create collection file, file id = " << file_schema.file_id_;
LOG_ENGINE_DEBUG_ << "Successfully create collection file, file id = " << file_schema.file_id_;
return utils::CreateCollectionFilePath(options_, file_schema);
} catch (std::exception& e) {
return HandleException("Encounter exception when create collection file", e.what());
......@@ -439,7 +439,7 @@ SqliteMetaImpl::GetCollectionFiles(const std::string& collection_id, const std::
collection_files.emplace_back(file_schema);
}
ENGINE_LOG_DEBUG << "Get collection files by id";
LOG_ENGINE_DEBUG_ << "Get collection files by id";
return result;
} catch (std::exception& e) {
return HandleException("Encounter exception when lookup collection files", e.what());
......@@ -489,7 +489,7 @@ SqliteMetaImpl::GetCollectionFilesBySegmentId(const std::string& segment_id,
}
}
ENGINE_LOG_DEBUG << "Get collection files by segment id";
LOG_ENGINE_DEBUG_ << "Get collection files by segment id";
return Status::OK();
} catch (std::exception& e) {
return HandleException("Encounter exception when lookup collection files by segment id", e.what());
......@@ -504,7 +504,7 @@ SqliteMetaImpl::UpdateCollectionFlag(const std::string& collection_id, int64_t f
// set all backup file to raw
ConnectorPtr->update_all(set(c(&CollectionSchema::flag_) = flag), where(c(&CollectionSchema::collection_id_) == collection_id));
ENGINE_LOG_DEBUG << "Successfully update collection flag, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully update collection flag, collection id = " << collection_id;
} catch (std::exception& e) {
std::string msg = "Encounter exception when update collection flag: collection_id = " + collection_id;
return HandleException(msg, e.what());
......@@ -520,7 +520,7 @@ SqliteMetaImpl::UpdateCollectionFlushLSN(const std::string& collection_id, uint6
ConnectorPtr->update_all(set(c(&CollectionSchema::flush_lsn_) = flush_lsn),
where(c(&CollectionSchema::collection_id_) == collection_id));
ENGINE_LOG_DEBUG << "Successfully update collection flush_lsn, collection id = " << collection_id << " flush_lsn = " << flush_lsn;;
LOG_ENGINE_DEBUG_ << "Successfully update collection flush_lsn, collection id = " << collection_id << " flush_lsn = " << flush_lsn;;
} catch (std::exception& e) {
std::string msg = "Encounter exception when update collection lsn: collection_id = " + collection_id;
return HandleException(msg, e.what());
......@@ -571,7 +571,7 @@ SqliteMetaImpl::UpdateCollectionFile(SegmentSchema& file_schema) {
ConnectorPtr->update(file_schema);
ENGINE_LOG_DEBUG << "Update single collection file, file id = " << file_schema.file_id_;
LOG_ENGINE_DEBUG_ << "Update single collection file, file id = " << file_schema.file_id_;
} catch (std::exception& e) {
std::string msg =
"Exception update collection file: collection_id = " + file_schema.collection_id_ + " file_id = " + file_schema.file_id_;
......@@ -621,7 +621,7 @@ SqliteMetaImpl::UpdateCollectionFiles(SegmentsSchema& files) {
return HandleException("UpdateCollectionFiles error: sqlite transaction failed");
}
ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files";
LOG_ENGINE_DEBUG_ << "Update " << files.size() << " collection files";
} catch (std::exception& e) {
return HandleException("Encounter exception when update collection files", e.what());
}
......@@ -640,7 +640,7 @@ SqliteMetaImpl::UpdateCollectionFilesRowCount(SegmentsSchema& files) {
ConnectorPtr->update_all(set(c(&SegmentSchema::row_count_) = file.row_count_,
c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()),
where(c(&SegmentSchema::file_id_) == file.file_id_));
ENGINE_LOG_DEBUG << "Update file " << file.file_id_ << " row count to " << file.row_count_;
LOG_ENGINE_DEBUG_ << "Update file " << file.file_id_ << " row count to " << file.row_count_;
}
} catch (std::exception& e) {
return HandleException("Encounter exception when update collection files row count", e.what());
......@@ -692,7 +692,7 @@ SqliteMetaImpl::UpdateCollectionIndex(const std::string& collection_id, const Co
where(c(&SegmentSchema::collection_id_) == collection_id and
c(&SegmentSchema::file_type_) == (int)SegmentSchema::BACKUP));
ENGINE_LOG_DEBUG << "Successfully update collection index, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully update collection index, collection id = " << collection_id;
} catch (std::exception& e) {
std::string msg = "Encounter exception when update collection index: collection_id = " + collection_id;
return HandleException(msg, e.what());
......@@ -715,7 +715,7 @@ SqliteMetaImpl::UpdateCollectionFilesToIndex(const std::string& collection_id) {
c(&SegmentSchema::row_count_) >= meta::BUILD_INDEX_THRESHOLD and
c(&SegmentSchema::file_type_) == (int)SegmentSchema::RAW));
ENGINE_LOG_DEBUG << "Update files to to_index, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Update files to to_index, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when update collection files to to_index", e.what());
}
......@@ -783,7 +783,7 @@ SqliteMetaImpl::DropCollectionIndex(const std::string& collection_id) {
set(c(&CollectionSchema::engine_type_) = raw_engine_type, c(&CollectionSchema::index_params_) = "{}"),
where(c(&CollectionSchema::collection_id_) == collection_id));
ENGINE_LOG_DEBUG << "Successfully drop collection index, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully drop collection index, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection index files", e.what());
}
......@@ -968,11 +968,11 @@ SqliteMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema&
files.emplace_back(collection_file);
}
if (files.empty()) {
ENGINE_LOG_ERROR << "No file to search for collection: " << collection_id;
LOG_ENGINE_ERROR_ << "No file to search for collection: " << collection_id;
}
if (selected.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-search files";
LOG_ENGINE_DEBUG_ << "Collect " << selected.size() << " to-search files";
}
return ret;
} catch (std::exception& e) {
......@@ -1038,7 +1038,7 @@ SqliteMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& f
}
if (to_merge_files > 0) {
ENGINE_LOG_TRACE << "Collect " << to_merge_files << " to-merge files";
LOG_ENGINE_TRACE_ << "Collect " << to_merge_files << " to-merge files";
}
return result;
} catch (std::exception& e) {
......@@ -1102,7 +1102,7 @@ SqliteMetaImpl::FilesToIndex(SegmentsSchema& files) {
}
if (selected.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-index files";
LOG_ENGINE_DEBUG_ << "Collect " << selected.size() << " to-index files";
}
return ret;
} catch (std::exception& e) {
......@@ -1206,7 +1206,7 @@ SqliteMetaImpl::FilesByType(const std::string& collection_id, const std::vector<
default:break;
}
}
ENGINE_LOG_DEBUG << msg;
LOG_ENGINE_DEBUG_ << msg;
}
} catch (std::exception& e) {
return HandleException("Encounter exception when check non index files", e.what());
......@@ -1284,9 +1284,9 @@ SqliteMetaImpl::FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files)
}
if (files.empty()) {
ENGINE_LOG_ERROR << "No file to search in file id list";
LOG_ENGINE_ERROR_ << "No file to search in file id list";
} else {
ENGINE_LOG_DEBUG << "Collect " << selected.size() << " files by id";
LOG_ENGINE_DEBUG_ << "Collect " << selected.size() << " files by id";
}
return ret;
......@@ -1323,7 +1323,7 @@ SqliteMetaImpl::Archive() {
return HandleException("Encounter exception when update collection files", e.what());
}
ENGINE_LOG_DEBUG << "Archive old files";
LOG_ENGINE_DEBUG_ << "Archive old files";
}
if (criteria == engine::ARCHIVE_CONF_DISK) {
uint64_t sum = 0;
......@@ -1332,7 +1332,7 @@ SqliteMetaImpl::Archive() {
int64_t to_delete = (int64_t)sum - limit * G;
DiscardFiles(to_delete);
ENGINE_LOG_DEBUG << "Archive files to free disk";
LOG_ENGINE_DEBUG_ << "Archive files to free disk";
}
}
......@@ -1375,7 +1375,7 @@ SqliteMetaImpl::CleanUpShadowFiles() {
auto commited = ConnectorPtr->transaction([&]() mutable {
for (auto& file : files) {
ENGINE_LOG_DEBUG << "Remove collection file type as NEW";
LOG_ENGINE_DEBUG_ << "Remove collection file type as NEW";
ConnectorPtr->remove<SegmentSchema>(std::get<0>(file));
}
return true;
......@@ -1388,7 +1388,7 @@ SqliteMetaImpl::CleanUpShadowFiles() {
}
if (files.size() > 0) {
ENGINE_LOG_DEBUG << "Clean " << files.size() << " files";
LOG_ENGINE_DEBUG_ << "Clean " << files.size() << " files";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when clean collection file", e.what());
......@@ -1439,7 +1439,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
// check if the file can be deleted
if (OngoingFileChecker::GetInstance().IsIgnored(collection_file)) {
ENGINE_LOG_DEBUG << "File:" << collection_file.file_id_
LOG_ENGINE_DEBUG_ << "File:" << collection_file.file_id_
<< " currently is in use, not able to delete now";
continue; // ignore this file, don't delete it
}
......@@ -1457,7 +1457,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
// delete file from disk storage
utils::DeleteCollectionFilePath(options_, collection_file);
ENGINE_LOG_DEBUG << "Remove file id:" << collection_file.file_id_ << " location:"
LOG_ENGINE_DEBUG_ << "Remove file id:" << collection_file.file_id_ << " location:"
<< collection_file.location_;
collection_ids.insert(collection_file.collection_id_);
segment_ids.insert(std::make_pair(collection_file.segment_id_, collection_file));
......@@ -1474,7 +1474,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
}
if (clean_files > 0) {
ENGINE_LOG_DEBUG << "Clean " << clean_files << " files expired in " << seconds << " seconds";
LOG_ENGINE_DEBUG_ << "Clean " << clean_files << " files expired in " << seconds << " seconds";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when clean collection files", e.what());
......@@ -1506,7 +1506,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
}
if (collections.size() > 0) {
ENGINE_LOG_DEBUG << "Remove " << collections.size() << " collections from meta";
LOG_ENGINE_DEBUG_ << "Remove " << collections.size() << " collections from meta";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when clean collection files", e.what());
......@@ -1529,7 +1529,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
}
if (remove_collections) {
ENGINE_LOG_DEBUG << "Remove " << remove_collections << " collections folder";
LOG_ENGINE_DEBUG_ << "Remove " << remove_collections << " collections folder";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection folder", e.what());
......@@ -1549,13 +1549,13 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
utils::DeleteSegment(options_, segment_id.second);
std::string segment_dir;
utils::GetParentPath(segment_id.second.location_, segment_dir);
ENGINE_LOG_DEBUG << "Remove segment directory: " << segment_dir;
LOG_ENGINE_DEBUG_ << "Remove segment directory: " << segment_dir;
++remove_segments;
}
}
if (remove_segments > 0) {
ENGINE_LOG_DEBUG << "Remove " << remove_segments << " segments folder";
LOG_ENGINE_DEBUG_ << "Remove " << remove_segments << " segments folder";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection folder", e.what());
......@@ -1597,7 +1597,7 @@ SqliteMetaImpl::Count(const std::string& collection_id, uint64_t& result) {
Status
SqliteMetaImpl::DropAll() {
ENGINE_LOG_DEBUG << "Drop all sqlite meta";
LOG_ENGINE_DEBUG_ << "Drop all sqlite meta";
try {
ConnectorPtr->drop_table(META_TABLES);
......@@ -1615,7 +1615,7 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) {
return Status::OK();
}
ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size;
LOG_ENGINE_DEBUG_ << "About to discard size=" << to_discard_size;
try {
fiu_do_on("SqliteMetaImpl.DiscardFiles.throw_exception", throw std::exception());
......@@ -1640,7 +1640,7 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) {
collection_file.id_ = std::get<0>(file);
collection_file.file_size_ = std::get<1>(file);
ids.push_back(collection_file.id_);
ENGINE_LOG_DEBUG << "Discard file id=" << collection_file.file_id_
LOG_ENGINE_DEBUG_ << "Discard file id=" << collection_file.file_id_
<< " file size=" << collection_file.file_size_;
to_discard_size -= collection_file.file_size_;
}
......@@ -1685,7 +1685,7 @@ SqliteMetaImpl::SetGlobalLastLSN(uint64_t lsn) {
ConnectorPtr->update_all(set(c(&EnvironmentSchema::global_lsn_) = lsn));
}
ENGINE_LOG_DEBUG << "Update global lsn = " << lsn;
LOG_ENGINE_DEBUG_ << "Update global lsn = " << lsn;
} catch (std::exception& e) {
std::string msg = "Exception update global lsn = " + lsn;
return HandleException(msg, e.what());
......
......@@ -50,7 +50,7 @@ MXLogBuffer::~MXLogBuffer() {
*/
bool
MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
WAL_LOG_DEBUG << "start_lsn " << start_lsn << " end_lsn " << end_lsn;
LOG_WAL_DEBUG_ << "start_lsn " << start_lsn << " end_lsn " << end_lsn;
ParserLsn(start_lsn, mxlog_buffer_reader_.file_no, mxlog_buffer_reader_.buf_offset);
ParserLsn(end_lsn, mxlog_buffer_writer_.file_no, mxlog_buffer_writer_.buf_offset);
......@@ -72,7 +72,7 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
file_handler.SetFileName(ToFileName(i));
auto file_size = file_handler.GetFileSize();
if (file_size == 0) {
WAL_LOG_ERROR << "bad wal file " << i;
LOG_WAL_ERROR_ << "bad wal file " << i;
return false;
}
if (file_size > buffer_size_need) {
......@@ -85,7 +85,7 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
if (buffer_size_need > mxlog_buffer_size_) {
mxlog_buffer_size_ = buffer_size_need;
WAL_LOG_INFO << "recovery will need more buffer, buffer size changed " << mxlog_buffer_size_;
LOG_WAL_INFO_ << "recovery will need more buffer, buffer size changed " << mxlog_buffer_size_;
}
}
......@@ -104,14 +104,14 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
} else {
mxlog_writer_.SetFileOpenMode("r+");
if (!mxlog_writer_.FileExists()) {
WAL_LOG_ERROR << "wal file not exist " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "wal file not exist " << mxlog_buffer_writer_.file_no;
return false;
}
auto read_offset = mxlog_buffer_reader_.buf_offset;
auto read_size = mxlog_buffer_writer_.buf_offset - mxlog_buffer_reader_.buf_offset;
if (!mxlog_writer_.Load(buf_[0].get() + read_offset, read_offset, read_size)) {
WAL_LOG_ERROR << "load wal file error " << read_offset << " " << read_size;
LOG_WAL_ERROR_ << "load wal file error " << read_offset << " " << read_size;
return false;
}
}
......@@ -135,11 +135,11 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
mxlog_writer_.SetFileName(ToFileName(mxlog_buffer_writer_.file_no));
mxlog_writer_.SetFileOpenMode("r+");
if (!mxlog_writer_.FileExists()) {
WAL_LOG_ERROR << "wal file not exist " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "wal file not exist " << mxlog_buffer_writer_.file_no;
return false;
}
if (!mxlog_writer_.Load(buf_[1].get(), 0, mxlog_buffer_writer_.buf_offset)) {
WAL_LOG_ERROR << "load wal file error " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "load wal file error " << mxlog_buffer_writer_.file_no;
return false;
}
}
......@@ -151,7 +151,7 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
void
MXLogBuffer::Reset(uint64_t lsn) {
WAL_LOG_DEBUG << "reset lsn " << lsn;
LOG_WAL_DEBUG_ << "reset lsn " << lsn;
buf_[0] = BufferPtr(new char[mxlog_buffer_size_]);
buf_[1] = BufferPtr(new char[mxlog_buffer_size_]);
......@@ -206,7 +206,7 @@ MXLogBuffer::Append(MXLogRecord& record) {
// Reborn means close old wal file and open new wal file
if (!mxlog_writer_.ReBorn(ToFileName(mxlog_buffer_writer_.file_no), "w")) {
WAL_LOG_ERROR << "ReBorn wal file error " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "ReBorn wal file error " << mxlog_buffer_writer_.file_no;
return WAL_FILE_ERROR;
}
}
......@@ -247,7 +247,7 @@ MXLogBuffer::Append(MXLogRecord& record) {
bool write_rst = mxlog_writer_.Write(current_write_buf + mxlog_buffer_writer_.buf_offset, record_size);
if (!write_rst) {
WAL_LOG_ERROR << "write wal file error";
LOG_WAL_ERROR_ << "write wal file error";
return WAL_FILE_ERROR;
}
......@@ -289,7 +289,7 @@ MXLogBuffer::Next(const uint64_t last_applied_lsn, MXLogRecord& record) {
mxlog_reader.SetFileOpenMode("r");
uint32_t file_size = mxlog_reader.Load(buf_[mxlog_buffer_reader_.buf_idx].get(), 0);
if (file_size == 0) {
WAL_LOG_ERROR << "load wal file error " << mxlog_buffer_reader_.file_no;
LOG_WAL_ERROR_ << "load wal file error " << mxlog_buffer_reader_.file_no;
return WAL_FILE_ERROR;
}
mxlog_buffer_reader_.max_offset = file_size;
......@@ -346,29 +346,29 @@ MXLogBuffer::GetReadLsn() {
bool
MXLogBuffer::ResetWriteLsn(uint64_t lsn) {
WAL_LOG_INFO << "reset write lsn " << lsn;
LOG_WAL_INFO_ << "reset write lsn " << lsn;
int32_t old_file_no = mxlog_buffer_writer_.file_no;
ParserLsn(lsn, mxlog_buffer_writer_.file_no, mxlog_buffer_writer_.buf_offset);
if (old_file_no == mxlog_buffer_writer_.file_no) {
WAL_LOG_DEBUG << "file No. is not changed";
LOG_WAL_DEBUG_ << "file No. is not changed";
return true;
}
std::unique_lock<std::mutex> lck(mutex_);
if (mxlog_buffer_writer_.file_no == mxlog_buffer_reader_.file_no) {
mxlog_buffer_writer_.buf_idx = mxlog_buffer_reader_.buf_idx;
WAL_LOG_DEBUG << "file No. is the same as reader";
LOG_WAL_DEBUG_ << "file No. is the same as reader";
return true;
}
lck.unlock();
if (!mxlog_writer_.ReBorn(ToFileName(mxlog_buffer_writer_.file_no), "r+")) {
WAL_LOG_ERROR << "reborn file error " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "reborn file error " << mxlog_buffer_writer_.file_no;
return false;
}
if (!mxlog_writer_.Load(buf_[mxlog_buffer_writer_.buf_idx].get(), 0, mxlog_buffer_writer_.buf_offset)) {
WAL_LOG_ERROR << "load file error";
LOG_WAL_ERROR_ << "load file error";
return false;
}
......@@ -387,7 +387,7 @@ MXLogBuffer::SetFileNoFrom(uint32_t file_no) {
if (!file_handler.FileExists()) {
break;
}
WAL_LOG_INFO << "Delete wal file " << file_no;
LOG_WAL_INFO_ << "Delete wal file " << file_no;
file_handler.DeleteFile();
} while (file_no > 0);
}
......@@ -402,7 +402,7 @@ MXLogBuffer::RemoveOldFiles(uint64_t flushed_lsn) {
MXLogFileHandler file_handler(mxlog_writer_.GetFilePath());
do {
file_handler.SetFileName(ToFileName(file_no_from_));
WAL_LOG_INFO << "Delete wal file " << file_no_from_;
LOG_WAL_INFO_ << "Delete wal file " << file_no_from_;
file_handler.DeleteFile();
} while (++file_no_from_ < file_no);
}
......
......@@ -41,7 +41,7 @@ WalManager::WalManager(const MXLogConfiguration& config) {
auto status = server::CommonUtil::CreateDirectory(mxlog_config_.mxlog_path);
if (!status.ok()) {
std::string msg = "failed to create wal directory " + mxlog_config_.mxlog_path;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(WAL_PATH_ERROR, msg);
}
}
......@@ -148,8 +148,8 @@ WalManager::GetNextRecovery(MXLogRecord& record) {
}
}
WAL_LOG_INFO << "record type " << (int32_t)record.type << " record lsn " << record.lsn << " error code "
<< error_code;
LOG_WAL_INFO_ << "record type " << (int32_t)record.type << " record lsn " << record.lsn << " error code "
<< error_code;
return error_code;
}
......@@ -166,7 +166,7 @@ WalManager::GetNextRecord(MXLogRecord& record) {
record.lsn = flush_info_.lsn_;
flush_info_.Clear();
WAL_LOG_INFO << "record flush collection " << record.collection_id << " lsn " << record.lsn;
LOG_WAL_INFO_ << "record flush collection " << record.collection_id << " lsn " << record.lsn;
return true;
}
}
......@@ -195,14 +195,14 @@ WalManager::GetNextRecord(MXLogRecord& record) {
}
}
WAL_LOG_INFO << "record type " << (int32_t)record.type << " collection " << record.collection_id << " lsn "
<< record.lsn;
LOG_WAL_INFO_ << "record type " << (int32_t)record.type << " collection " << record.collection_id << " lsn "
<< record.lsn;
return error_code;
}
uint64_t
WalManager::CreateCollection(const std::string& collection_id) {
WAL_LOG_INFO << "create collection " << collection_id << " " << last_applied_lsn_;
LOG_WAL_INFO_ << "create collection " << collection_id << " " << last_applied_lsn_;
std::lock_guard<std::mutex> lck(mutex_);
uint64_t applied_lsn = last_applied_lsn_;
tables_[collection_id] = {applied_lsn, applied_lsn};
......@@ -211,7 +211,7 @@ WalManager::CreateCollection(const std::string& collection_id) {
void
WalManager::DropCollection(const std::string& collection_id) {
WAL_LOG_INFO << "drop collection " << collection_id;
LOG_WAL_INFO_ << "drop collection " << collection_id;
std::lock_guard<std::mutex> lck(mutex_);
tables_.erase(collection_id);
}
......@@ -225,7 +225,7 @@ WalManager::CollectionFlushed(const std::string& collection_id, uint64_t lsn) {
}
lck.unlock();
WAL_LOG_INFO << collection_id << " is flushed by lsn " << lsn;
LOG_WAL_INFO_ << collection_id << " is flushed by lsn " << lsn;
}
template <typename T>
......@@ -243,7 +243,7 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio
size_t vector_num = vector_ids.size();
if (vector_num == 0) {
WAL_LOG_ERROR << LogOut("[%s][%ld] The ids is empty.", "insert", 0);
LOG_WAL_ERROR_ << LogOut("[%s][%ld] The ids is empty.", "insert", 0);
return false;
}
size_t dim = vectors.size() / vector_num;
......@@ -265,8 +265,8 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio
max_rcd_num = (mxlog_config_.buffer_size - head_size) / unit_size;
}
if (max_rcd_num == 0) {
WAL_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "Wal buffer size is too small "
<< mxlog_config_.buffer_size << " unit " << unit_size;
LOG_WAL_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << "Wal buffer size is too small "
<< mxlog_config_.buffer_size << " unit " << unit_size;
return false;
}
......@@ -291,8 +291,8 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio
}
lck.unlock();
WAL_LOG_INFO << LogOut("[%s][%ld]", "insert", 0) << collection_id << " insert in part " << partition_tag
<< " with lsn " << new_lsn;
LOG_WAL_INFO_ << LogOut("[%s][%ld]", "insert", 0) << collection_id << " insert in part " << partition_tag
<< " with lsn " << new_lsn;
return p_meta_handler_->SetMXLogInternalMeta(new_lsn);
}
......@@ -301,7 +301,7 @@ bool
WalManager::DeleteById(const std::string& collection_id, const IDNumbers& vector_ids) {
size_t vector_num = vector_ids.size();
if (vector_num == 0) {
WAL_LOG_ERROR << "The ids is empty.";
LOG_WAL_ERROR_ << "The ids is empty.";
return false;
}
......@@ -344,7 +344,7 @@ WalManager::DeleteById(const std::string& collection_id, const IDNumbers& vector
}
lck.unlock();
WAL_LOG_INFO << collection_id << " delete rows by id, lsn " << new_lsn;
LOG_WAL_INFO_ << collection_id << " delete rows by id, lsn " << new_lsn;
return p_meta_handler_->SetMXLogInternalMeta(new_lsn);
}
......@@ -381,7 +381,7 @@ WalManager::Flush(const std::string& collection_id) {
flush_info_.lsn_ = lsn;
}
WAL_LOG_INFO << collection_id << " want to be flush, lsn " << lsn;
LOG_WAL_INFO_ << collection_id << " want to be flush, lsn " << lsn;
return lsn;
}
......
......@@ -40,7 +40,7 @@ KnowhereResource::Initialize() {
faiss::faiss_use_avx512 = use_avx512;
std::string cpu_flag;
if (faiss::hook_init(cpu_flag)) {
ENGINE_LOG_DEBUG << "FAISS hook " << cpu_flag;
LOG_ENGINE_DEBUG_ << "FAISS hook " << cpu_flag;
} else {
return Status(KNOWHERE_UNEXPECTED_ERROR, "FAISS hook fail, CPU not supported!");
}
......
......@@ -63,13 +63,13 @@ SystemInfo::Init() {
nvmlresult = nvmlInit();
fiu_do_on("SystemInfo.Init.nvmInit_fail", nvmlresult = NVML_ERROR_NOT_FOUND);
if (NVML_SUCCESS != nvmlresult) {
SERVER_LOG_ERROR << "System information initilization failed";
LOG_SERVER_ERROR_ << "System information initilization failed";
return;
}
nvmlresult = nvmlDeviceGetCount(&num_device_);
fiu_do_on("SystemInfo.Init.nvm_getDevice_fail", nvmlresult = NVML_ERROR_NOT_FOUND);
if (NVML_SUCCESS != nvmlresult) {
SERVER_LOG_ERROR << "Unable to get devidce number";
LOG_SERVER_ERROR_ << "Unable to get devidce number";
return;
}
#endif
......@@ -158,7 +158,7 @@ SystemInfo::getTotalCpuTime(std::vector<uint64_t>& work_time_array) {
FILE* file = fopen("/proc/stat", "r");
fiu_do_on("SystemInfo.getTotalCpuTime.open_proc", file = NULL);
if (file == NULL) {
SERVER_LOG_ERROR << "Could not open stat file";
LOG_SERVER_ERROR_ << "Could not open stat file";
return total_time_array;
}
......@@ -170,7 +170,7 @@ SystemInfo::getTotalCpuTime(std::vector<uint64_t>& work_time_array) {
char* ret = fgets(buffer, sizeof(buffer) - 1, file);
fiu_do_on("SystemInfo.getTotalCpuTime.read_proc", ret = NULL);
if (ret == NULL) {
SERVER_LOG_ERROR << "Could not read stat file";
LOG_SERVER_ERROR_ << "Could not read stat file";
fclose(file);
return total_time_array;
}
......@@ -265,7 +265,7 @@ SystemInfo::CPUTemperature() {
dir = opendir(path.c_str());
fiu_do_on("SystemInfo.CPUTemperature.opendir", dir = NULL);
if (!dir) {
SERVER_LOG_ERROR << "Could not open hwmon directory";
LOG_SERVER_ERROR_ << "Could not open hwmon directory";
return result;
}
......@@ -283,7 +283,7 @@ SystemInfo::CPUTemperature() {
FILE* file = fopen(object.c_str(), "r");
fiu_do_on("SystemInfo.CPUTemperature.openfile", file = NULL);
if (file == nullptr) {
SERVER_LOG_ERROR << "Could not open temperature file";
LOG_SERVER_ERROR_ << "Could not open temperature file";
return result;
}
float temp;
......
......@@ -49,7 +49,7 @@ PrometheusMetrics::Init() {
// Pushgateway Registry
gateway_->RegisterCollectable(registry_);
} catch (std::exception& ex) {
SERVER_LOG_ERROR << "Failed to connect prometheus server: " << std::string(ex.what());
LOG_SERVER_ERROR_ << "Failed to connect prometheus server: " << std::string(ex.what());
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}
......
......@@ -293,7 +293,7 @@ class PrometheusMetrics : public MetricsBase {
PushToGateway() override {
if (startup_) {
if (gateway_->Push() != 200) {
ENGINE_LOG_WARNING << "Metrics pushgateway failed";
LOG_ENGINE_WARNING_ << "Metrics pushgateway failed";
}
}
}
......
......@@ -68,6 +68,7 @@ JobMgr::Put(const JobPtr& job) {
void
JobMgr::worker_function() {
SetThreadName("jobmgr_thread");
while (running_) {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [this] { return !queue_.empty(); });
......
......@@ -19,8 +19,8 @@ namespace scheduler {
void
ResourceMgr::Start() {
if (not check_resource_valid()) {
ENGINE_LOG_ERROR << "Resources invalid, cannot start ResourceMgr.";
ENGINE_LOG_ERROR << Dump();
LOG_ENGINE_ERROR_ << "Resources invalid, cannot start ResourceMgr.";
LOG_ENGINE_ERROR_ << Dump();
return;
}
......@@ -54,7 +54,7 @@ ResourceMgr::Add(ResourcePtr&& resource) {
std::lock_guard<std::mutex> lck(resources_mutex_);
if (running_) {
ENGINE_LOG_ERROR << "ResourceMgr is running, not allow to add resource";
LOG_ENGINE_ERROR_ << "ResourceMgr is running, not allow to add resource";
return ret;
}
......@@ -97,7 +97,7 @@ void
ResourceMgr::Clear() {
std::lock_guard<std::mutex> lck(resources_mutex_);
if (running_) {
ENGINE_LOG_ERROR << "ResourceMgr is running, cannot clear.";
LOG_ENGINE_ERROR_ << "ResourceMgr is running, cannot clear.";
return;
}
disk_resources_.clear();
......@@ -237,6 +237,7 @@ ResourceMgr::post_event(const EventPtr& event) {
void
ResourceMgr::event_process() {
SetThreadName("resevt_thread");
while (running_) {
std::unique_lock<std::mutex> lock(event_mutex_);
event_cv_.wait(lock, [this] { return !queue_.empty(); });
......
......@@ -110,14 +110,14 @@ class OptimizerInst {
for (auto build_id : build_gpus) {
build_msg.append(" gpu" + std::to_string(build_id));
}
SERVER_LOG_DEBUG << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
std::string search_msg = "Search gpu:";
for (auto search_id : search_gpus) {
search_msg.append(" gpu" + std::to_string(search_id));
}
search_msg.append(". gpu_search_threshold:" + std::to_string(gpu_search_threshold));
SERVER_LOG_DEBUG << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
pass_list.push_back(std::make_shared<BuildIndexPass>());
pass_list.push_back(std::make_shared<FaissFlatPass>());
......
......@@ -79,6 +79,7 @@ Scheduler::process(const EventPtr& event) {
void
Scheduler::worker_function() {
SetThreadName("schedevt_thread");
while (running_) {
std::unique_lock<std::mutex> lock(event_mutex_);
event_cv_.wait(lock, [this] { return !event_queue_.empty(); });
......
......@@ -173,7 +173,7 @@ TaskTable::PickToLoad(uint64_t limit) {
// if task is a build index task, limit it
if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") {
if (BuildMgrInst::GetInstance()->NumOfAvailable() < 1) {
SERVER_LOG_WARNING << "BuildMgr doesnot have available place for building index";
LOG_SERVER_WARNING_ << "BuildMgr doesnot have available place for building index";
continue;
}
}
......@@ -188,7 +188,7 @@ TaskTable::PickToLoad(uint64_t limit) {
size_t count = 0;
for (uint64_t j = last_finish_ + 1; j < table_.size(); ++j) {
if (not table_[j]) {
SERVER_LOG_WARNING << "collection[" << j << "] is nullptr";
LOG_SERVER_WARNING_ << "collection[" << j << "] is nullptr";
}
if (table_[j]->task->path().Current() == "cpu") {
......
......@@ -31,8 +31,8 @@ BuildIndexJob::AddToIndexFiles(const engine::meta::SegmentSchemaPtr& to_index_fi
return false;
}
SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " add to_index file: " << to_index_file->id_
<< ", location: " << to_index_file->location_;
LOG_SERVER_DEBUG_ << "BuildIndexJob " << id() << " add to_index file: " << to_index_file->id_
<< ", location: " << to_index_file->location_;
to_index_files_[to_index_file->id_] = to_index_file;
return true;
......@@ -42,7 +42,7 @@ void
BuildIndexJob::WaitBuildIndexFinish() {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [this] { return to_index_files_.empty(); });
SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " all done";
LOG_SERVER_DEBUG_ << "BuildIndexJob " << id() << " all done";
}
void
......@@ -50,7 +50,7 @@ BuildIndexJob::BuildIndexDone(size_t to_index_id) {
std::unique_lock<std::mutex> lock(mutex_);
to_index_files_.erase(to_index_id);
cv_.notify_all();
SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " finish index file: " << to_index_id;
LOG_SERVER_DEBUG_ << "BuildIndexJob " << id() << " finish index file: " << to_index_id;
}
json
......
......@@ -28,7 +28,7 @@ SearchJob::AddIndexFile(const SegmentSchemaPtr& index_file) {
return false;
}
SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld add index file: %ld", "search", 0, id(), index_file->id_);
LOG_SERVER_DEBUG_ << LogOut("[%s][%ld] SearchJob %ld add index file: %ld", "search", 0, id(), index_file->id_);
index_files_[index_file->id_] = index_file;
return true;
......@@ -38,7 +38,7 @@ void
SearchJob::WaitResult() {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [this] { return index_files_.empty(); });
SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld all done", "search", 0, id());
LOG_SERVER_DEBUG_ << LogOut("[%s][%ld] SearchJob %ld all done", "search", 0, id());
}
void
......@@ -49,7 +49,7 @@ SearchJob::SearchDone(size_t index_id) {
cv_.notify_all();
}
SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld finish index file: %ld", "search", 0, id(), index_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%ld] SearchJob %ld finish index file: %ld", "search", 0, id(), index_id);
}
ResultIds&
......
......@@ -39,19 +39,19 @@ BuildIndexPass::Run(const TaskPtr& task) {
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << "Gpu disabled, specify cpu to build index!";
LOG_SERVER_DEBUG_ << "Gpu disabled, specify cpu to build index!";
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
fiu_do_on("BuildIndexPass.Run.empty_gpu_ids", build_gpus_.clear());
if (build_gpus_.empty()) {
SERVER_LOG_WARNING << "BuildIndexPass cannot get build index gpu!";
LOG_SERVER_WARNING_ << "BuildIndexPass cannot get build index gpu!";
return false;
}
if (specified_gpu_id_ >= build_gpus_.size()) {
specified_gpu_id_ = specified_gpu_id_ % build_gpus_.size();
}
SERVER_LOG_DEBUG << "Specify gpu" << specified_gpu_id_ << " to build index!";
LOG_SERVER_DEBUG_ << "Specify gpu" << specified_gpu_id_ << " to build index!";
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, build_gpus_[specified_gpu_id_]);
specified_gpu_id_ = (specified_gpu_id_ + 1) % build_gpus_.size();
}
......
......@@ -54,16 +54,16 @@ FaissFlatPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissFlatPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
best_device_id, "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
best_device_id, "search", 0);
++count_;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}
......
......@@ -55,16 +55,16 @@ FaissIVFFlatPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFFlatPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}
......
......@@ -57,16 +57,16 @@ FaissIVFPQPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFPQPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFPQPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFPQPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
++count_;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}
......
......@@ -54,17 +54,17 @@ FaissIVFSQ8HPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8HPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
}
if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8HPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
++count_;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}
......
......@@ -55,16 +55,16 @@ FaissIVFSQ8Pass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8Pass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8Pass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}
......
......@@ -27,7 +27,7 @@ FallbackPass::Run(const TaskPtr& task) {
return false;
}
// NEVER be empty
SERVER_LOG_DEBUG << "FallbackPass!";
LOG_SERVER_DEBUG_ << "FallbackPass!";
auto cpu = ResMgrInst::GetInstance()->GetCpuResources()[0];
auto label = std::make_shared<SpecResLabel>(cpu);
task->label() = label;
......
......@@ -153,6 +153,7 @@ Resource::pick_task_execute() {
void
Resource::loader_function() {
SetThreadName("taskloader_th");
while (running_) {
std::unique_lock<std::mutex> lock(load_mutex_);
load_cv_.wait(lock, [&] { return load_flag_; });
......@@ -165,7 +166,7 @@ Resource::loader_function() {
}
if (task_item->task->Type() == TaskType::BuildIndexTask && name() == "cpu") {
BuildMgrInst::GetInstance()->Take();
SERVER_LOG_DEBUG << name() << " load BuildIndexTask";
LOG_SERVER_DEBUG_ << name() << " load BuildIndexTask";
}
LoadFile(task_item->task);
task_item->Loaded();
......@@ -183,6 +184,7 @@ Resource::loader_function() {
void
Resource::executor_function() {
SetThreadName("taskexector_th");
if (subscriber_) {
auto event = std::make_shared<StartUpEvent>(shared_from_this());
subscriber_(std::static_pointer_cast<Event>(event));
......
......@@ -136,7 +136,7 @@ XBuildIndexTask::Execute() {
fiu_do_on("XBuildIndexTask.Execute.create_table_success", status = Status::OK());
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create collection file: " << status.ToString();
LOG_ENGINE_ERROR_ << "Failed to create collection file: " << status.ToString();
build_index_job->BuildIndexDone(to_index_id_);
build_index_job->GetStatus() = status;
to_index_engine_ = nullptr;
......@@ -146,7 +146,7 @@ XBuildIndexTask::Execute() {
auto failed_build_index = [&](std::string log_msg, std::string err_msg) {
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateCollectionFile(table_file);
ENGINE_LOG_ERROR << log_msg;
LOG_ENGINE_ERROR_ << log_msg;
build_index_job->BuildIndexDone(to_index_id_);
build_index_job->GetStatus() = Status(DB_ERROR, err_msg);
......@@ -155,7 +155,7 @@ XBuildIndexTask::Execute() {
// step 2: build index
try {
ENGINE_LOG_DEBUG << "Begin build index for file:" + table_file.location_;
LOG_ENGINE_DEBUG_ << "Begin build index for file:" + table_file.location_;
index = to_index_engine_->BuildIndex(table_file.location_, (EngineType)table_file.engine_type_);
fiu_do_on("XBuildIndexTask.Execute.build_index_fail", index = nullptr);
if (index == nullptr) {
......@@ -215,9 +215,9 @@ XBuildIndexTask::Execute() {
fiu_do_on("XBuildIndexTask.Execute.update_table_file_fail", status = Status(SERVER_UNEXPECTED_ERROR, ""));
if (status.ok()) {
ENGINE_LOG_DEBUG << "New index file " << table_file.file_id_ << " of size " << table_file.file_size_
<< " bytes"
<< " from file " << origin_file.file_id_;
LOG_ENGINE_DEBUG_ << "New index file " << table_file.file_id_ << " of size " << table_file.file_size_
<< " bytes"
<< " from file " << origin_file.file_id_;
if (build_index_job->options().insert_cache_immediately_) {
index->Cache();
}
......@@ -225,12 +225,13 @@ XBuildIndexTask::Execute() {
// failed to update meta, mark the new file as to_delete, don't delete old file
origin_file.file_type_ = engine::meta::SegmentSchema::TO_INDEX;
status = meta_ptr->UpdateCollectionFile(origin_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << origin_file.file_id_ << " to to_index";
LOG_ENGINE_DEBUG_ << "Failed to update file to index, mark file: " << origin_file.file_id_
<< " to to_index";
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateCollectionFile(table_file);
ENGINE_LOG_DEBUG << "Failed to up date file to index, mark file: " << table_file.file_id_
<< " to to_delete";
LOG_ENGINE_DEBUG_ << "Failed to up date file to index, mark file: " << table_file.file_id_
<< " to to_delete";
}
build_index_job->BuildIndexDone(to_index_id_);
......
......@@ -57,7 +57,7 @@ static constexpr size_t PARALLEL_REDUCE_BATCH = 1000;
// if (thread_count > 0) {
// reduce_batch = max_index / thread_count + 1;
// }
// ENGINE_LOG_DEBUG << "use " << thread_count <<
// LOG_ENGINE_DEBUG_ << "use " << thread_count <<
// " thread parallelly do reduce, each thread process " << reduce_batch << " vectors";
//
// std::vector<std::shared_ptr<std::thread> > thread_array;
......@@ -159,7 +159,7 @@ XSearchTask::Load(LoadType type, uint8_t device_id) {
} catch (std::exception& ex) {
// typical error: out of disk space or permition denied
error_msg = "Failed to load index file: " + std::string(ex.what());
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Encounter execption: %s", "search", 0, error_msg.c_str());
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Encounter execption: %s", "search", 0, error_msg.c_str());
stat = Status(SERVER_UNEXPECTED_ERROR, error_msg);
}
fiu_do_on("XSearchTask.Load.out_of_memory", stat = Status(SERVER_UNEXPECTED_ERROR, "out of memory"));
......@@ -202,7 +202,7 @@ void
XSearchTask::Execute() {
milvus::server::ContextFollower tracer(context_, "XSearchTask::Execute " + std::to_string(index_id_));
// ENGINE_LOG_DEBUG << "Searching in file id:" << index_id_ << " with "
// LOG_ENGINE_DEBUG_ << "Searching in file id:" << index_id_ << " with "
// << search_contexts_.size() << " tasks";
// TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_));
......@@ -266,8 +266,8 @@ XSearchTask::Execute() {
// step 3: pick up topk result
auto spec_k = file_->row_count_ < topk ? file_->row_count_ : topk;
if (spec_k == 0) {
ENGINE_LOG_WARNING << LogOut("[%s][%ld] Searching in an empty file. file location = %s", "search", 0,
file_->location_.c_str());
LOG_ENGINE_WARNING_ << LogOut("[%s][%ld] Searching in an empty file. file location = %s", "search", 0,
file_->location_.c_str());
}
{
......@@ -288,7 +288,7 @@ XSearchTask::Execute() {
span = rc.RecordSection(hdr + ", reduce topk");
// search_job->AccumReduceCost(span);
} catch (std::exception& ex) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] SearchTask encounter exception: %s", "search", 0, ex.what());
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] SearchTask encounter exception: %s", "search", 0, ex.what());
// search_job->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed
}
......@@ -307,7 +307,7 @@ XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const sch
size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids,
scheduler::ResultDistances& tar_distances) {
if (src_ids.empty()) {
ENGINE_LOG_DEBUG << LogOut("[%s][%d] Search result is empty.", "search", 0);
LOG_ENGINE_DEBUG_ << LogOut("[%s][%d] Search result is empty.", "search", 0);
return;
}
......
......@@ -53,7 +53,7 @@ IdBloomFilter::Add(doc_id_t uid) {
const std::lock_guard<std::mutex> lock(mutex_);
if (scaling_bloom_add(bloom_filter_, s.c_str(), s.size(), uid) == -1) {
// Counter overflow does not affect bloom filter's normal functionality
ENGINE_LOG_WARNING << "Warning adding id=" << s << " to bloom filter: 4 bit counter Overflow";
LOG_ENGINE_WARNING_ << "Warning adding id=" << s << " to bloom filter: 4 bit counter Overflow";
// return Status(DB_BLOOM_FILTER_ERROR, "Bloom filter error: 4 bit counter Overflow");
}
return Status::OK();
......@@ -65,7 +65,7 @@ IdBloomFilter::Remove(doc_id_t uid) {
const std::lock_guard<std::mutex> lock(mutex_);
if (scaling_bloom_remove(bloom_filter_, s.c_str(), s.size(), uid) == -1) {
// Should never go in here, but just to be safe
ENGINE_LOG_WARNING << "Warning removing id=" << s << " in bloom filter: Decrementing zero in counter";
LOG_ENGINE_WARNING_ << "Warning removing id=" << s << " in bloom filter: Decrementing zero in counter";
// return Status(DB_BLOOM_FILTER_ERROR, "Error removing in bloom filter: Decrementing zero in counter");
}
return Status::OK();
......
......@@ -66,7 +66,7 @@ SegmentReader::LoadVectors(off_t offset, size_t num_bytes, std::vector<uint8_t>&
default_codec.GetVectorsFormat()->read_vectors(fs_ptr_, offset, num_bytes, raw_vectors);
} catch (std::exception& e) {
std::string err_msg = "Failed to load raw vectors: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
......@@ -80,7 +80,7 @@ SegmentReader::LoadUids(std::vector<doc_id_t>& uids) {
default_codec.GetVectorsFormat()->read_uids(fs_ptr_, uids);
} catch (std::exception& e) {
std::string err_msg = "Failed to load uids: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
......@@ -100,7 +100,7 @@ SegmentReader::LoadVectorIndex(const std::string& location, segment::VectorIndex
default_codec.GetVectorIndexFormat()->read(fs_ptr_, location, vector_index_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to load vector index: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
......@@ -114,7 +114,7 @@ SegmentReader::LoadBloomFilter(segment::IdBloomFilterPtr& id_bloom_filter_ptr) {
default_codec.GetIdBloomFilterFormat()->read(fs_ptr_, id_bloom_filter_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to load bloom filter: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
......@@ -128,7 +128,7 @@ SegmentReader::LoadDeletedDocs(segment::DeletedDocsPtr& deleted_docs_ptr) {
default_codec.GetDeletedDocsFormat()->read(fs_ptr_, deleted_docs_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to load deleted docs: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
......@@ -142,7 +142,7 @@ SegmentReader::ReadDeletedDocsSize(size_t& size) {
default_codec.GetDeletedDocsFormat()->readSize(fs_ptr_, size);
} catch (std::exception& e) {
std::string err_msg = "Failed to read deleted docs size: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
......
......@@ -62,7 +62,7 @@ SegmentWriter::Serialize() {
auto status = WriteBloomFilter();
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
......@@ -70,7 +70,7 @@ SegmentWriter::Serialize() {
status = WriteVectors();
if (!status.ok()) {
ENGINE_LOG_ERROR << "Write vectors fail: " << status.message();
LOG_ENGINE_ERROR_ << "Write vectors fail: " << status.message();
return status;
}
......@@ -92,7 +92,7 @@ SegmentWriter::WriteVectors() {
default_codec.GetVectorsFormat()->write(fs_ptr_, segment_ptr_->vectors_ptr_);
} catch (std::exception& e) {
std::string err_msg = "Failed to write vectors: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
......@@ -106,7 +106,7 @@ SegmentWriter::WriteVectorIndex(const std::string& location) {
default_codec.GetVectorIndexFormat()->write(fs_ptr_, location, segment_ptr_->vector_index_ptr_);
} catch (std::exception& e) {
std::string err_msg = "Failed to write vector index: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
......@@ -136,7 +136,7 @@ SegmentWriter::WriteBloomFilter() {
recorder.RecordSection("Writing bloom filter");
} catch (std::exception& e) {
std::string err_msg = "Failed to write vectors: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
......@@ -151,7 +151,7 @@ SegmentWriter::WriteDeletedDocs() {
default_codec.GetDeletedDocsFormat()->write(fs_ptr_, deleted_docs_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to write deleted docs: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
......@@ -165,7 +165,7 @@ SegmentWriter::WriteDeletedDocs(const DeletedDocsPtr& deleted_docs) {
default_codec.GetDeletedDocsFormat()->write(fs_ptr_, deleted_docs);
} catch (std::exception& e) {
std::string err_msg = "Failed to write deleted docs: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
......@@ -179,7 +179,7 @@ SegmentWriter::WriteBloomFilter(const IdBloomFilterPtr& id_bloom_filter_ptr) {
default_codec.GetIdBloomFilterFormat()->write(fs_ptr_, id_bloom_filter_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to write bloom filter: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
......@@ -203,7 +203,7 @@ SegmentWriter::Merge(const std::string& dir_to_merge, const std::string& name) {
return Status(DB_ERROR, "Cannot Merge Self");
}
ENGINE_LOG_DEBUG << "Merging from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
LOG_ENGINE_DEBUG_ << "Merging from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
TimeRecorder recorder("SegmentWriter::Merge");
......@@ -214,7 +214,7 @@ SegmentWriter::Merge(const std::string& dir_to_merge, const std::string& name) {
status = segment_reader_to_merge.Load();
if (!status.ok()) {
std::string msg = "Failed to load segment from " + dir_to_merge;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
}
......@@ -238,7 +238,7 @@ SegmentWriter::Merge(const std::string& dir_to_merge, const std::string& name) {
auto rows = segment_to_merge->vectors_ptr_->GetCount();
recorder.RecordSection("Adding " + std::to_string(rows) + " vectors and uids");
ENGINE_LOG_DEBUG << "Merging completed from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
LOG_ENGINE_DEBUG_ << "Merging completed from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
return Status::OK();
}
......
......@@ -72,7 +72,7 @@ Vectors::Erase(std::vector<int32_t>& offsets) {
recorder.RecordSection("Deduplicating " + std::to_string(offsets.size()) + " offsets to delete");
// Reconstruct raw vectors and uids
ENGINE_LOG_DEBUG << "Begin erasing...";
LOG_ENGINE_DEBUG_ << "Begin erasing...";
size_t new_size = uids_.size() - offsets.size();
std::vector<doc_id_t> new_uids(new_size);
......
......@@ -131,7 +131,7 @@ DBWrapper::StartService() {
if (omp_thread > 0) {
omp_set_num_threads(omp_thread);
SERVER_LOG_DEBUG << "Specify openmp thread number: " << omp_thread;
LOG_SERVER_DEBUG_ << "Specify openmp thread number: " << omp_thread;
} else {
int64_t sys_thread_cnt = 8;
if (CommonUtil::GetSystemAvailableThreads(sys_thread_cnt)) {
......
......@@ -58,7 +58,7 @@ Server::Daemonize() {
// std::string log_path(GetLogDirFullPath());
// log_path += "zdb_server.(INFO/WARNNING/ERROR/CRITICAL)";
// SERVER_LOG_INFO << "Log will be exported to: " + log_path);
// LOG_SERVER_INFO_ << "Log will be exported to: " + log_path);
pid_t pid = 0;
......@@ -187,11 +187,11 @@ Server::Start() {
InitLog(log_config_file_);
// print version information
SERVER_LOG_INFO << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME;
LOG_SERVER_INFO_ << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME;
#ifdef MILVUS_GPU_VERSION
SERVER_LOG_INFO << "GPU edition";
LOG_SERVER_INFO_ << "GPU edition";
#else
SERVER_LOG_INFO << "CPU edition";
LOG_SERVER_INFO_ << "CPU edition";
#endif
/* record config and hardware information into log */
LogConfigInFile(config_filename_);
......@@ -262,7 +262,7 @@ Server::StartService() {
Status stat;
stat = engine::KnowhereResource::Initialize();
if (!stat.ok()) {
SERVER_LOG_ERROR << "KnowhereResource initialize fail: " << stat.message();
LOG_SERVER_ERROR_ << "KnowhereResource initialize fail: " << stat.message();
goto FAIL;
}
......@@ -270,7 +270,7 @@ Server::StartService() {
stat = DBWrapper::GetInstance().StartService();
if (!stat.ok()) {
SERVER_LOG_ERROR << "DBWrapper start service fail: " << stat.message();
LOG_SERVER_ERROR_ << "DBWrapper start service fail: " << stat.message();
goto FAIL;
}
......@@ -279,7 +279,7 @@ Server::StartService() {
// stat = storage::S3ClientWrapper::GetInstance().StartService();
// if (!stat.ok()) {
// SERVER_LOG_ERROR << "S3Client start service fail: " << stat.message();
// LOG_SERVER_ERROR_ << "S3Client start service fail: " << stat.message();
// goto FAIL;
// }
......
......@@ -53,7 +53,7 @@ RequestScheduler::Stop() {
return;
}
SERVER_LOG_INFO << "Scheduler gonna stop...";
LOG_SERVER_INFO_ << "Scheduler gonna stop...";
{
std::lock_guard<std::mutex> lock(queue_mtx_);
for (auto& iter : request_groups_) {
......@@ -71,7 +71,7 @@ RequestScheduler::Stop() {
request_groups_.clear();
execute_threads_.clear();
stopped_ = true;
SERVER_LOG_INFO << "Scheduler stopped";
LOG_SERVER_INFO_ << "Scheduler stopped";
}
Status
......@@ -90,7 +90,7 @@ RequestScheduler::ExecuteRequest(const BaseRequestPtr& request_ptr) {
fiu_do_on("RequestScheduler.ExecuteRequest.push_queue_fail", status = Status(SERVER_INVALID_ARGUMENT, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << "Put request to queue failed with code: " << status.ToString();
LOG_SERVER_ERROR_ << "Put request to queue failed with code: " << status.ToString();
request_ptr->Done();
return status;
}
......@@ -109,6 +109,7 @@ RequestScheduler::ExecuteRequest(const BaseRequestPtr& request_ptr) {
void
RequestScheduler::TakeToExecute(RequestQueuePtr request_queue) {
SetThreadName("reqsched_thread");
if (request_queue == nullptr) {
return;
}
......@@ -116,7 +117,7 @@ RequestScheduler::TakeToExecute(RequestQueuePtr request_queue) {
while (true) {
BaseRequestPtr request = request_queue->TakeRequest();
if (request == nullptr) {
SERVER_LOG_ERROR << "Take null from request queue, stop thread";
LOG_SERVER_ERROR_ << "Take null from request queue, stop thread";
break; // stop the thread
}
......@@ -126,10 +127,10 @@ RequestScheduler::TakeToExecute(RequestQueuePtr request_queue) {
fiu_do_on("RequestScheduler.TakeToExecute.throw_std_exception", throw std::exception());
fiu_do_on("RequestScheduler.TakeToExecute.execute_fail", status = Status(SERVER_INVALID_ARGUMENT, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << "Request failed with code: " << status.ToString();
LOG_SERVER_ERROR_ << "Request failed with code: " << status.ToString();
}
} catch (std::exception& ex) {
SERVER_LOG_ERROR << "Request failed to execute: " << ex.what();
LOG_SERVER_ERROR_ << "Request failed to execute: " << ex.what();
}
}
}
......@@ -152,7 +153,7 @@ RequestScheduler::PutToQueue(const BaseRequestPtr& request_ptr) {
fiu_do_on("RequestScheduler.PutToQueue.push_null_thread", execute_threads_.push_back(nullptr));
execute_threads_.push_back(thread);
SERVER_LOG_INFO << "Create new thread for request group: " << group_name;
LOG_SERVER_INFO_ << "Create new thread for request group: " << group_name;
}
return Status::OK();
......
......@@ -66,7 +66,7 @@ RequestGroup(BaseRequest::RequestType type) {
auto iter = s_map_type_group.find(type);
if (iter == s_map_type_group.end()) {
SERVER_LOG_ERROR << "Unsupported request type: " << type;
LOG_SERVER_ERROR_ << "Unsupported request type: " << type;
throw Exception(SERVER_NOT_IMPLEMENT, "request group undefined");
}
return iter->second;
......@@ -125,7 +125,7 @@ void
BaseRequest::set_status(const Status& status) {
status_ = status;
if (!status_.ok()) {
SERVER_LOG_ERROR << status_.message();
LOG_SERVER_ERROR_ << status_.message();
}
}
......
......@@ -72,7 +72,7 @@ DeleteByIDRequest::OnExecute() {
collection_schema.engine_type_ == (int32_t)engine::EngineType::SPTAG_KDT) {
std::string err_msg =
"Index type " + std::to_string(collection_schema.engine_type_) + " does not support delete operation";
SERVER_LOG_ERROR << err_msg;
LOG_SERVER_ERROR_ << err_msg;
return Status(SERVER_UNSUPPORTED_ERROR, err_msg);
}
......
......@@ -52,7 +52,7 @@ DropPartitionRequest::OnExecute() {
// step 2: check partition tag
if (partition_tag == milvus::engine::DEFAULT_PARTITON_TAG) {
std::string msg = "Default partition cannot be dropped.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
......
......@@ -47,7 +47,7 @@ FlushRequest::OnExecute() {
TimeRecorderAuto rc(hdr);
Status status = Status::OK();
SERVER_LOG_DEBUG << hdr;
LOG_SERVER_DEBUG_ << hdr;
for (auto& name : collection_names_) {
// only process root collection, ignore partition collection
......
......@@ -46,7 +46,7 @@ InsertRequest::Create(const std::shared_ptr<milvus::server::Context>& context, c
Status
InsertRequest::OnExecute() {
SERVER_LOG_INFO << LogOut("[%s][%ld] ", "insert", 0) << "Execute insert request.";
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "insert", 0) << "Execute insert request.";
try {
int64_t vector_count = vectors_data_.vector_count_;
fiu_do_on("InsertRequest.OnExecute.throw_std_exception", throw std::exception());
......@@ -57,12 +57,12 @@ InsertRequest::OnExecute() {
// step 1: check arguments
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid collection name: %s", "insert", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Invalid collection name: %s", "insert", 0, status.message().c_str());
return status;
}
if (vectors_data_.float_data_.empty() && vectors_data_.binary_data_.empty()) {
std::string msg = "The vector array is empty. Make sure you have entered vector records.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid records: %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Invalid records: %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
......@@ -70,7 +70,7 @@ InsertRequest::OnExecute() {
if (!vectors_data_.id_array_.empty()) {
if (vectors_data_.id_array_.size() != vector_count) {
std::string msg = "The size of vector ID array must be equal to the size of the vector.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid id array: %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Invalid id array: %s", "insert", 0, msg.c_str());
return Status(SERVER_ILLEGAL_VECTOR_ID, msg);
}
}
......@@ -85,17 +85,17 @@ InsertRequest::OnExecute() {
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Collection %s not found", "insert", 0, collection_name_.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Collection %s not found", "insert", 0, collection_name_.c_str());
return Status(SERVER_COLLECTION_NOT_EXIST, CollectionNotExistMsg(collection_name_));
} else {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Describe collection %s fail: %s", "insert", 0,
collection_name_.c_str(), status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Describe collection %s fail: %s", "insert", 0,
collection_name_.c_str(), status.message().c_str());
return status;
}
} else {
if (!collection_schema.owner_collection_.empty()) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] owner collection of %s is empty", "insert", 0,
collection_name_.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] owner collection of %s is empty", "insert", 0,
collection_name_.c_str());
return Status(SERVER_INVALID_COLLECTION_NAME, CollectionNotExistMsg(collection_name_));
}
}
......@@ -108,7 +108,7 @@ InsertRequest::OnExecute() {
// user already provided id before, all insert action require user id
if ((collection_schema.flag_ & engine::meta::FLAG_MASK_HAS_USERID) != 0 && !user_provide_ids) {
std::string msg = "Entities IDs are user-defined. Please provide IDs for all entities of the collection.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_ILLEGAL_VECTOR_ID, msg);
}
......@@ -131,40 +131,40 @@ InsertRequest::OnExecute() {
if (!vectors_data_.float_data_.empty()) { // insert float vectors
if (engine::utils::IsBinaryMetricType(collection_schema.metric_type_)) {
std::string msg = "Collection metric type doesn't support float vectors.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
// check prepared float data
if (vectors_data_.float_data_.size() % vector_count != 0) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
fiu_do_on("InsertRequest.OnExecute.invalid_dim", collection_schema.dimension_ = -1);
if (vectors_data_.float_data_.size() / vector_count != collection_schema.dimension_) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
}
} else if (!vectors_data_.binary_data_.empty()) { // insert binary vectors
if (!engine::utils::IsBinaryMetricType(collection_schema.metric_type_)) {
std::string msg = "Collection metric type doesn't support binary vectors.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
// check prepared binary data
if (vectors_data_.binary_data_.size() % vector_count != 0) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
if (vectors_data_.binary_data_.size() * 8 / vector_count != collection_schema.dimension_) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
}
}
......@@ -176,7 +176,7 @@ InsertRequest::OnExecute() {
status = DBWrapper::DB()->InsertVectors(collection_name_, partition_tag_, vectors_data_);
fiu_do_on("InsertRequest.OnExecute.insert_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, status.message().c_str());
return status;
}
......@@ -185,7 +185,7 @@ InsertRequest::OnExecute() {
if (ids_size != vec_count) {
std::string msg =
"Add " + std::to_string(vec_count) + " vectors but only return " + std::to_string(ids_size) + " id";
SERVER_LOG_ERROR << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, msg.c_str());
return Status(SERVER_ILLEGAL_VECTOR_ID, msg);
}
......@@ -201,7 +201,7 @@ InsertRequest::OnExecute() {
rc.RecordSection("add vectors to engine");
rc.ElapseFromBegin("total cost");
} catch (std::exception& ex) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Encounter exception: %s", "insert", 0, ex.what());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Encounter exception: %s", "insert", 0, ex.what());
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}
......
......@@ -111,7 +111,7 @@ SearchByIDRequest::OnExecute() {
config.GetGpuResourceConfigSearchResources(search_resources);
if (!search_resources.empty()) {
std::string err_msg = "SearchByID cannot be executed on GPU";
SERVER_LOG_ERROR << err_msg;
LOG_SERVER_ERROR_ << err_msg;
return Status(SERVER_UNSUPPORTED_ERROR, err_msg);
}
}
......@@ -125,7 +125,7 @@ SearchByIDRequest::OnExecute() {
collection_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IVFSQ8) {
std::string err_msg = "Index type " + std::to_string(collection_schema.engine_type_) +
" does not support SearchByID operation";
SERVER_LOG_ERROR << err_msg;
LOG_SERVER_ERROR_ << err_msg;
return Status(SERVER_UNSUPPORTED_ERROR, err_msg);
}
......
......@@ -233,8 +233,8 @@ Status
SearchCombineRequest::OnExecute() {
try {
size_t combined_request = request_list_.size();
SERVER_LOG_DEBUG << "SearchCombineRequest execute, request count=" << combined_request
<< ", extra_params=" << extra_params_.dump();
LOG_SERVER_DEBUG_ << "SearchCombineRequest execute, request count=" << combined_request
<< ", extra_params=" << extra_params_.dump();
std::string hdr = "SearchCombineRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
......@@ -309,12 +309,12 @@ SearchCombineRequest::OnExecute() {
// all requests are skipped
if (request_list_.empty()) {
SERVER_LOG_DEBUG << "all combined requests were skipped";
LOG_SERVER_DEBUG_ << "all combined requests were skipped";
return Status::OK();
}
SERVER_LOG_DEBUG << (combined_request - run_request) << " requests were skipped";
SERVER_LOG_DEBUG << "reset topk to " << search_topk_;
LOG_SERVER_DEBUG_ << (combined_request - run_request) << " requests were skipped";
LOG_SERVER_DEBUG_ << "reset topk to " << search_topk_;
rc.RecordSection("check validation");
// step 3: construct vectors_data
......@@ -348,7 +348,7 @@ SearchCombineRequest::OnExecute() {
}
}
SERVER_LOG_DEBUG << total_count << " query vectors combined";
LOG_SERVER_DEBUG_ << total_count << " query vectors combined";
rc.RecordSection("combined query vectors");
// step 4: search vectors
......
......@@ -54,7 +54,7 @@ SearchRequest::Create(const std::shared_ptr<milvus::server::Context>& context, c
Status
SearchRequest::OnPreExecute() {
SERVER_LOG_INFO << LogOut("[%s][%ld] ", "search", 0) << "Search pre-execute. Check search parameters";
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "search", 0) << "Search pre-execute. Check search parameters";
std::string hdr = "SearchRequest pre-execute(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(LogOut("[%s][%ld] %s", "search", 0, hdr.c_str()));
......@@ -62,14 +62,14 @@ SearchRequest::OnPreExecute() {
// step 1: check collection name
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
return status;
}
// step 2: check search topk
status = ValidationUtil::ValidateSearchTopk(topk_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
return status;
}
......@@ -77,7 +77,7 @@ SearchRequest::OnPreExecute() {
status = ValidationUtil::ValidatePartitionTags(partition_list_);
fiu_do_on("SearchRequest.OnExecute.invalid_partition_tags", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
return status;
}
......@@ -86,7 +86,7 @@ SearchRequest::OnPreExecute() {
Status
SearchRequest::OnExecute() {
SERVER_LOG_INFO << LogOut("[%s][%ld] ", "search", 0) << "Search execute.";
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "search", 0) << "Search execute.";
try {
uint64_t vector_count = vectors_data_.vector_count_;
fiu_do_on("SearchRequest.OnExecute.throw_std_exception", throw std::exception());
......@@ -103,17 +103,18 @@ SearchRequest::OnExecute() {
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Collection %s not found: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Collection %s not found: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
return Status(SERVER_COLLECTION_NOT_EXIST, CollectionNotExistMsg(collection_name_));
} else {
SERVER_LOG_ERROR << LogOut("[%s][%d] Error occurred when describing collection %s: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Error occurred when describing collection %s: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
return status;
}
} else {
if (!collection_schema_.owner_collection_.empty()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, CollectionNotExistMsg(collection_name_).c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0,
CollectionNotExistMsg(collection_name_).c_str());
return Status(SERVER_INVALID_COLLECTION_NAME, CollectionNotExistMsg(collection_name_));
}
}
......@@ -121,14 +122,14 @@ SearchRequest::OnExecute() {
// step 5: check search parameters
status = ValidationUtil::ValidateSearchParams(extra_params_, collection_schema_, topk_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Invalid search params: %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Invalid search params: %s", "search", 0, status.message().c_str());
return status;
}
// step 6: check vector data according to metric type
status = ValidationUtil::ValidateVectorData(vectors_data_, collection_schema_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Invalid vector data: %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Invalid vector data: %s", "search", 0, status.message().c_str());
return status;
}
......@@ -158,7 +159,7 @@ SearchRequest::OnExecute() {
#endif
fiu_do_on("SearchRequest.OnExecute.query_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Query fail: %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Query fail: %s", "search", 0, status.message().c_str());
return status;
}
fiu_do_on("SearchRequest.OnExecute.empty_result_ids", result_ids.clear());
......@@ -173,7 +174,7 @@ SearchRequest::OnExecute() {
result_.distance_list_.swap(result_distances);
rc.RecordSection("construct result");
} catch (std::exception& ex) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Encounter exception: %s", "search", 0, ex.what());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Encounter exception: %s", "search", 0, ex.what());
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}
......
......@@ -30,7 +30,7 @@ Status
SearchReqStrategy::ReScheduleQueue(const BaseRequestPtr& request, std::queue<BaseRequestPtr>& queue) {
if (request->GetRequestType() != BaseRequest::kSearch) {
std::string msg = "search strategy can only handle search request";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}
......@@ -49,7 +49,7 @@ SearchReqStrategy::ReScheduleQueue(const BaseRequestPtr& request, std::queue<Bas
combine_request->Combine(last_search_req);
combine_request->Combine(new_search_req);
queue.push(combine_request);
SERVER_LOG_DEBUG << "Combine 2 search request";
LOG_SERVER_DEBUG_ << "Combine 2 search request";
} else {
// directly put to queue
queue.push(request);
......@@ -59,14 +59,14 @@ SearchReqStrategy::ReScheduleQueue(const BaseRequestPtr& request, std::queue<Bas
if (combine_req->CanCombine(new_search_req)) {
// combine request
combine_req->Combine(new_search_req);
SERVER_LOG_DEBUG << "Combine more search request";
LOG_SERVER_DEBUG_ << "Combine more search request";
} else {
// directly put to queue
queue.push(request);
}
} else {
std::string msg = "unsupported request type for search strategy";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}
......
......@@ -174,7 +174,7 @@ void
set_request_id(::grpc::ServerContext* context, const std::string& request_id) {
if (not context) {
// error
SERVER_LOG_ERROR << "set_request_id: grpc::ServerContext is nullptr" << std::endl;
LOG_SERVER_ERROR_ << "set_request_id: grpc::ServerContext is nullptr" << std::endl;
return;
}
......@@ -185,7 +185,7 @@ std::string
get_request_id(::grpc::ServerContext* context) {
if (not context) {
// error
SERVER_LOG_ERROR << "get_request_id: grpc::ServerContext is nullptr" << std::endl;
LOG_SERVER_ERROR_ << "get_request_id: grpc::ServerContext is nullptr" << std::endl;
return "INVALID_ID";
}
......@@ -194,7 +194,7 @@ get_request_id(::grpc::ServerContext* context) {
auto request_id_kv = server_metadata.find(REQ_ID);
if (request_id_kv == server_metadata.end()) {
// error
SERVER_LOG_ERROR << std::string(REQ_ID) << " not found in grpc.server_metadata" << std::endl;
LOG_SERVER_ERROR_ << std::string(REQ_ID) << " not found in grpc.server_metadata" << std::endl;
return "INVALID_ID";
}
......@@ -242,7 +242,7 @@ GrpcRequestHandler::OnPostRecvInitialMetaData(
auto request_id_kv = client_metadata.find("request_id");
if (request_id_kv != client_metadata.end()) {
request_id = request_id_kv->second.data();
SERVER_LOG_DEBUG << "client provide request_id: " << request_id;
LOG_SERVER_DEBUG_ << "client provide request_id: " << request_id;
// if request_id is being used by another request,
// convert it to request_id_n.
......@@ -265,7 +265,7 @@ GrpcRequestHandler::OnPostRecvInitialMetaData(
} else {
request_id = std::to_string(get_sequential_id());
set_request_id(server_context, request_id);
SERVER_LOG_DEBUG << "milvus generate request_id: " << request_id;
LOG_SERVER_DEBUG_ << "milvus generate request_id: " << request_id;
}
auto trace_context = std::make_shared<tracing::TraceContext>(span);
......@@ -282,7 +282,7 @@ GrpcRequestHandler::OnPreSendMessage(::grpc::experimental::ServerRpcInfo* server
if (context_map_.find(request_id) == context_map_.end()) {
// error
SERVER_LOG_ERROR << "request_id " << request_id << " not found in context_map_";
LOG_SERVER_ERROR_ << "request_id " << request_id << " not found in context_map_";
return;
}
context_map_[request_id]->GetTraceContext()->GetSpan()->Finish();
......@@ -294,7 +294,7 @@ GrpcRequestHandler::GetContext(::grpc::ServerContext* server_context) {
std::lock_guard<std::mutex> lock(context_map_mutex_);
auto request_id = get_request_id(server_context);
if (context_map_.find(request_id) == context_map_.end()) {
SERVER_LOG_ERROR << "GetContext: request_id " << request_id << " not found in context_map_";
LOG_SERVER_ERROR_ << "GetContext: request_id " << request_id << " not found in context_map_";
return nullptr;
}
return context_map_[request_id];
......@@ -382,7 +382,7 @@ GrpcRequestHandler::Insert(::grpc::ServerContext* context, const ::milvus::grpc:
::milvus::grpc::VectorIds* response) {
CHECK_NULLPTR_RETURN(request);
SERVER_LOG_INFO << LogOut("[%s][%d] Start insert.", "insert", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Start insert.", "insert", 0);
// step 1: copy vector data
engine::VectorsData vectors;
......@@ -397,7 +397,7 @@ GrpcRequestHandler::Insert(::grpc::ServerContext* context, const ::milvus::grpc:
memcpy(response->mutable_vector_id_array()->mutable_data(), vectors.id_array_.data(),
vectors.id_array_.size() * sizeof(int64_t));
SERVER_LOG_INFO << LogOut("[%s][%d] Insert done.", "insert", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Insert done.", "insert", 0);
SET_RESPONSE(response->mutable_status(), status, context);
return ::grpc::Status::OK;
}
......@@ -450,7 +450,7 @@ GrpcRequestHandler::Search(::grpc::ServerContext* context, const ::milvus::grpc:
::milvus::grpc::TopKQueryResult* response) {
CHECK_NULLPTR_RETURN(request);
SERVER_LOG_INFO << LogOut("[%s][%d] Search start in gRPC server", "search", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Search start in gRPC server", "search", 0);
// step 1: copy vector data
engine::VectorsData vectors;
CopyRowRecords(request->query_record_array(), google::protobuf::RepeatedField<google::protobuf::int64>(), vectors);
......@@ -481,7 +481,7 @@ GrpcRequestHandler::Search(::grpc::ServerContext* context, const ::milvus::grpc:
// step 5: construct and return result
ConstructResults(result, response);
SERVER_LOG_INFO << LogOut("[%s][%d] Search done.", "search", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Search done.", "search", 0);
SET_RESPONSE(response->mutable_status(), status, context);
......
......@@ -73,6 +73,7 @@ GrpcServer::Stop() {
Status
GrpcServer::StartService() {
SetThreadName("grpcserv_thread");
Config& config = Config::GetInstance();
std::string address, port;
......
......@@ -39,6 +39,7 @@ WebServer::Stop() {
Status
WebServer::StartService() {
SetThreadName("webserv_thread");
oatpp::base::Environment::init();
Config& config = Config::GetInstance();
......
......@@ -33,7 +33,7 @@ DiskOperation::CreateDirectory() {
auto ret = boost::filesystem::create_directory(dir_path_);
if (!ret) {
std::string err_msg = "Failed to create directory: " + dir_path_;
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FOLDER, err_msg);
}
}
......
......@@ -38,7 +38,7 @@ S3ClientWrapper::StartService() {
CONFIG_CHECK(config.GetStorageConfigS3Enable(s3_enable));
fiu_do_on("S3ClientWrapper.StartService.s3_disable", s3_enable = false);
if (!s3_enable) {
STORAGE_LOG_INFO << "S3 not enabled!";
LOG_STORAGE_INFO_ << "S3 not enabled!";
return Status::OK();
}
......@@ -89,12 +89,12 @@ S3ClientWrapper::CreateBucket() {
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
if (err.GetErrorType() != Aws::S3::S3Errors::BUCKET_ALREADY_OWNED_BY_YOU) {
STORAGE_LOG_ERROR << "ERROR: CreateBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: CreateBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
}
STORAGE_LOG_DEBUG << "CreateBucket '" << s3_bucket_ << "' successfully!";
LOG_STORAGE_DEBUG_ << "CreateBucket '" << s3_bucket_ << "' successfully!";
return Status::OK();
}
......@@ -108,11 +108,11 @@ S3ClientWrapper::DeleteBucket() {
fiu_do_on("S3ClientWrapper.DeleteBucket.outcome.fail", outcome = Aws::S3::Model::DeleteBucketOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: DeleteBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: DeleteBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "DeleteBucket '" << s3_bucket_ << "' successfully!";
LOG_STORAGE_DEBUG_ << "DeleteBucket '" << s3_bucket_ << "' successfully!";
return Status::OK();
}
......@@ -121,7 +121,7 @@ S3ClientWrapper::PutObjectFile(const std::string& object_name, const std::string
struct stat buffer;
if (stat(file_path.c_str(), &buffer) != 0) {
std::string str = "File '" + file_path + "' not exist!";
STORAGE_LOG_ERROR << "ERROR: " << str;
LOG_STORAGE_ERROR_ << "ERROR: " << str;
return Status(SERVER_UNEXPECTED_ERROR, str);
}
......@@ -137,11 +137,11 @@ S3ClientWrapper::PutObjectFile(const std::string& object_name, const std::string
fiu_do_on("S3ClientWrapper.PutObjectFile.outcome.fail", outcome = Aws::S3::Model::PutObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "PutObjectFile '" << file_path << "' successfully!";
LOG_STORAGE_DEBUG_ << "PutObjectFile '" << file_path << "' successfully!";
return Status::OK();
}
......@@ -159,11 +159,11 @@ S3ClientWrapper::PutObjectStr(const std::string& object_name, const std::string&
fiu_do_on("S3ClientWrapper.PutObjectStr.outcome.fail", outcome = Aws::S3::Model::PutObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "PutObjectStr successfully!";
LOG_STORAGE_DEBUG_ << "PutObjectStr successfully!";
return Status::OK();
}
......@@ -177,7 +177,7 @@ S3ClientWrapper::GetObjectFile(const std::string& object_name, const std::string
fiu_do_on("S3ClientWrapper.GetObjectFile.outcome.fail", outcome = Aws::S3::Model::GetObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
......@@ -186,7 +186,7 @@ S3ClientWrapper::GetObjectFile(const std::string& object_name, const std::string
output_file << retrieved_file.rdbuf();
output_file.close();
STORAGE_LOG_DEBUG << "GetObjectFile '" << file_path << "' successfully!";
LOG_STORAGE_DEBUG_ << "GetObjectFile '" << file_path << "' successfully!";
return Status::OK();
}
......@@ -200,7 +200,7 @@ S3ClientWrapper::GetObjectStr(const std::string& object_name, std::string& conte
fiu_do_on("S3ClientWrapper.GetObjectStr.outcome.fail", outcome = Aws::S3::Model::GetObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
......@@ -209,7 +209,7 @@ S3ClientWrapper::GetObjectStr(const std::string& object_name, std::string& conte
ss << retrieved_file.rdbuf();
content = std::move(ss.str());
STORAGE_LOG_DEBUG << "GetObjectStr successfully!";
LOG_STORAGE_DEBUG_ << "GetObjectStr successfully!";
return Status::OK();
}
......@@ -227,7 +227,7 @@ S3ClientWrapper::ListObjects(std::vector<std::string>& object_list, const std::s
fiu_do_on("S3ClientWrapper.ListObjects.outcome.fail", outcome = Aws::S3::Model::ListObjectsOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: ListObjects: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: ListObjects: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
......@@ -238,9 +238,9 @@ S3ClientWrapper::ListObjects(std::vector<std::string>& object_list, const std::s
}
if (marker.empty()) {
STORAGE_LOG_DEBUG << "ListObjects '" << s3_bucket_ << "' successfully!";
LOG_STORAGE_DEBUG_ << "ListObjects '" << s3_bucket_ << "' successfully!";
} else {
STORAGE_LOG_DEBUG << "ListObjects '" << s3_bucket_ << ":" << marker << "' successfully!";
LOG_STORAGE_DEBUG_ << "ListObjects '" << s3_bucket_ << ":" << marker << "' successfully!";
}
return Status::OK();
}
......@@ -255,11 +255,11 @@ S3ClientWrapper::DeleteObject(const std::string& object_name) {
fiu_do_on("S3ClientWrapper.DeleteObject.outcome.fail", outcome = Aws::S3::Model::DeleteObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: DeleteObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: DeleteObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "DeleteObject '" << object_name << "' successfully!";
LOG_STORAGE_DEBUG_ << "DeleteObject '" << object_name << "' successfully!";
return Status::OK();
}
......
......@@ -247,7 +247,7 @@ CommonUtil::GetCurrentTimeStr() {
void
CommonUtil::EraseFromCache(const std::string& item_key) {
if (item_key.empty()) {
SERVER_LOG_ERROR << "Empty key cannot be erased from cache";
LOG_SERVER_ERROR_ << "Empty key cannot be erased from cache";
return;
}
......
......@@ -31,4 +31,22 @@ LogOut(const char* pattern, ...) {
return std::string(str_p.get());
}
void
SetThreadName(const std::string& name) {
pthread_setname_np(pthread_self(), name.c_str());
}
std::string
GetThreadName() {
std::string thread_name = "unamed";
char name[16];
size_t len = 16;
auto err = pthread_getname_np(pthread_self(), name, len);
if (not err) {
thread_name = name;
}
return thread_name;
}
} // namespace milvus
......@@ -17,57 +17,172 @@
namespace milvus {
/*
* Please use LOG_MODULE_LEVEL_C macro in member function of class
* and LOG_MODULE_LEVEL_ macro in other functions.
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#define SERVER_MODULE_NAME "SERVER"
#define SERVER_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", SERVER_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define SERVER_MODULE_FUNCTION LogOut("[%s][%s][%s] ", SERVER_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define LOG_SERVER_TRACE_C LOG(TRACE) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_DEBUG_C LOG(DEBUG) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_INFO_C LOG(INFO) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_WARNING_C LOG(WARNING) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_ERROR_C LOG(ERROR) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_FATAL_C LOG(FATAL) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_TRACE_ LOG(TRACE) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_DEBUG_ LOG(DEBUG) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_INFO_ LOG(INFO) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_WARNING_ LOG(WARNING) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_ERROR_ LOG(ERROR) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_FATAL_ LOG(FATAL) << SERVER_MODULE_FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////////////
#define ENGINE_MODULE_NAME "ENGINE"
#define ENGINE_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", ENGINE_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define ENGINE_MODULE_FUNCTION LogOut("[%s][%s][%s] ", ENGINE_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define LOG_ENGINE_TRACE_C LOG(TRACE) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_DEBUG_C LOG(DEBUG) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_INFO_C LOG(INFO) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_WARNING_C LOG(WARNING) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_ERROR_C LOG(ERROR) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_FATAL_C LOG(FATAL) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_TRACE_ LOG(TRACE) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_DEBUG_ LOG(DEBUG) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_INFO_ LOG(INFO) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_WARNING_ LOG(WARNING) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_ERROR_ LOG(ERROR) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_FATAL_ LOG(FATAL) << ENGINE_MODULE_FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////////////
#define WRAPPER_MODULE_NAME "WRAPPER"
#define WRAPPER_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", WRAPPER_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define WRAPPER_MODULE_FUNCTION LogOut("[%s][%s][%s] ", WRAPPER_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define LOG_WRAPPER_TRACE_C LOG(TRACE) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_DEBUG_C LOG(DEBUG) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_INFO_C LOG(INFO) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_WARNING_C LOG(WARNING) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_ERROR_C LOG(ERROR) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_FATAL_C LOG(FATAL) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_TRACE_ LOG(TRACE) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_DEBUG_ LOG(DEBUG) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_INFO_ LOG(INFO) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_WARNING_ LOG(WARNING) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_ERROR_ LOG(ERROR) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_FATAL_ LOG(FATAL) << WRAPPER_MODULE_FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////////////
#define SERVER_DOMAIN_NAME "[SERVER] "
#define STORAGE_MODULE_NAME "STORAGE"
#define STORAGE_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", STORAGE_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define STORAGE_MODULE_FUNCTION LogOut("[%s][%s][%s] ", STORAGE_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define LOG_STORAGE_TRACE_C LOG(TRACE) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_DEBUG_C LOG(DEBUG) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_INFO_C LOG(INFO) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_WARNING_C LOG(WARNING) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_ERROR_C LOG(ERROR) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_FATAL_C LOG(FATAL) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_TRACE_ LOG(TRACE) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_DEBUG_ LOG(DEBUG) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_INFO_ LOG(INFO) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_WARNING_ LOG(WARNING) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_ERROR_ LOG(ERROR) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_FATAL_ LOG(FATAL) << STORAGE_MODULE_FUNCTION
#define SERVER_LOG_TRACE LOG(TRACE) << SERVER_DOMAIN_NAME
#define SERVER_LOG_DEBUG LOG(DEBUG) << SERVER_DOMAIN_NAME
#define SERVER_LOG_INFO LOG(INFO) << SERVER_DOMAIN_NAME
#define SERVER_LOG_WARNING LOG(WARNING) << SERVER_DOMAIN_NAME
#define SERVER_LOG_ERROR LOG(ERROR) << SERVER_DOMAIN_NAME
#define SERVER_LOG_FATAL LOG(FATAL) << SERVER_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
#define WAL_MODULE_NAME "WAL"
#define WAL_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", WAL_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define WAL_MODULE_FUNCTION LogOut("[%s][%s][%s] ", WAL_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define LOG_WAL_TRACE_C LOG(TRACE) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_DEBUG_C LOG(DEBUG) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_INFO_C LOG(INFO) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_WARNING_C LOG(WARNING) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_ERROR_C LOG(ERROR) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_FATAL_C LOG(FATAL) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_TRACE_ LOG(TRACE) << WAL_MODULE_FUNCTION
#define LOG_WAL_DEBUG_ LOG(DEBUG) << WAL_MODULE_FUNCTION
#define LOG_WAL_INFO_ LOG(INFO) << WAL_MODULE_FUNCTION
#define LOG_WAL_WARNING_ LOG(WARNING) << WAL_MODULE_FUNCTION
#define LOG_WAL_ERROR_ LOG(ERROR) << WAL_MODULE_FUNCTION
#define LOG_WAL_FATAL_ LOG(FATAL) << WAL_MODULE_FUNCTION
/*
* Deprecated
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
// #define SERVER_DOMAIN_NAME "[SERVER]"
// #define LOG_SERVER_TRACE_ LOG(TRACE) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_DEBUG_ LOG(DEBUG) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_INFO_ LOG(INFO) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_WARNING_ LOG(WARNING) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_ERROR_ LOG(ERROR) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_FATAL_ LOG(FATAL) << SERVER_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
#define ENGINE_DOMAIN_NAME "[ENGINE] "
// #define ENGINE_DOMAIN_NAME "[ENGINE]"
#define ENGINE_LOG_TRACE LOG(TRACE) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_DEBUG LOG(DEBUG) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_INFO LOG(INFO) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_WARNING LOG(WARNING) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_ERROR LOG(ERROR) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_FATAL LOG(FATAL) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_TRACE_ LOG(TRACE) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_DEBUG_ LOG(DEBUG) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_INFO_ LOG(INFO) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_WARNING_ LOG(WARNING) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_ERROR_ LOG(ERROR) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_FATAL_ LOG(FATAL) << ENGINE_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
#define WRAPPER_DOMAIN_NAME "[WRAPPER] "
// #define WRAPPER_DOMAIN_NAME "[WRAPPER]"
#define WRAPPER_LOG_TRACE LOG(TRACE) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_DEBUG LOG(DEBUG) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_INFO LOG(INFO) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_WARNING LOG(WARNING) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_ERROR LOG(ERROR) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_FATAL LOG(FATAL) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_TRACE LOG(TRACE) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_DEBUG LOG(DEBUG) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_INFO LOG(INFO) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_WARNING LOG(WARNING) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_ERROR LOG(ERROR) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_FATAL LOG(FATAL) << WRAPPER_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
#define STORAGE_DOMAIN_NAME "[STORAGE] "
// #define STORAGE_DOMAIN_NAME "[STORAGE]"
#define STORAGE_LOG_TRACE LOG(TRACE) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_DEBUG LOG(DEBUG) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_INFO LOG(INFO) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_WARNING LOG(WARNING) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_ERROR LOG(ERROR) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_FATAL LOG(FATAL) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_TRACE_ LOG(TRACE) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_DEBUG_ LOG(DEBUG) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_INFO_ LOG(INFO) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_WARNING_ LOG(WARNING) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_ERROR_ LOG(ERROR) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_FATAL_ LOG(FATAL) << STORAGE_DOMAIN_NAME
#define WAL_DOMAIN_NAME "[WAL] "
// #define WAL_DOMAIN_NAME "[WAL]"
#define WAL_LOG_TRACE LOG(TRACE) << WAL_DOMAIN_NAME
#define WAL_LOG_DEBUG LOG(DEBUG) << WAL_DOMAIN_NAME
#define WAL_LOG_INFO LOG(INFO) << WAL_DOMAIN_NAME
#define WAL_LOG_WARNING LOG(WARNING) << WAL_DOMAIN_NAME
#define WAL_LOG_ERROR LOG(ERROR) << WAL_DOMAIN_NAME
#define WAL_LOG_FATAL LOG(FATAL) << WAL_DOMAIN_NAME
// #define LOG_WAL_TRACE_ LOG(TRACE) << WAL_DOMAIN_NAME
// #define LOG_WAL_DEBUG_ LOG(DEBUG) << WAL_DOMAIN_NAME
// #define LOG_WAL_INFO_ LOG(INFO) << WAL_DOMAIN_NAME
// #define LOG_WAL_WARNING_ LOG(WARNING) << WAL_DOMAIN_NAME
// #define LOG_WAL_ERROR_ LOG(ERROR) << WAL_DOMAIN_NAME
// #define LOG_WAL_FATAL_ LOG(FATAL) << WAL_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////////
std::string
LogOut(const char* pattern, ...);
void
SetThreadName(const std::string& name);
std::string
GetThreadName();
} // namespace milvus
......@@ -97,7 +97,7 @@ LogConfigInFile(const std::string& path) {
auto node = YAML::LoadFile(path);
YAML::Emitter out;
out << node;
SERVER_LOG_DEBUG << "\n\n"
LOG_SERVER_INFO_ << "\n\n"
<< std::string(15, '*') << "Config in file" << std::string(15, '*') << "\n\n"
<< out.c_str();
}
......@@ -107,7 +107,7 @@ LogConfigInMem() {
auto& config = Config::GetInstance();
std::string config_str;
config.GetConfigJsonStr(config_str, 3);
SERVER_LOG_DEBUG << "\n\n"
LOG_SERVER_INFO_ << "\n\n"
<< std::string(15, '*') << "Config in memory" << std::string(15, '*') << "\n\n"
<< config_str;
}
......@@ -117,7 +117,7 @@ LogCpuInfo() {
/*CPU information*/
std::fstream fcpu("/proc/cpuinfo", std::ios::in);
if (!fcpu.is_open()) {
SERVER_LOG_WARNING << "Cannot obtain CPU information. Open file /proc/cpuinfo fail: " << strerror(errno);
LOG_SERVER_WARNING_ << "Cannot obtain CPU information. Open file /proc/cpuinfo fail: " << strerror(errno);
return;
}
std::stringstream cpu_info_ss;
......@@ -127,12 +127,12 @@ LogCpuInfo() {
auto processor_pos = cpu_info.rfind("processor");
if (std::string::npos == processor_pos) {
SERVER_LOG_WARNING << "Cannot obtain CPU information. No sub string \'processor\'";
LOG_SERVER_WARNING_ << "Cannot obtain CPU information. No sub string \'processor\'";
return;
}
auto sub_str = cpu_info.substr(processor_pos);
SERVER_LOG_DEBUG << "\n\n" << std::string(15, '*') << "CPU" << std::string(15, '*') << "\n\n" << sub_str;
LOG_SERVER_INFO_ << "\n\n" << std::string(15, '*') << "CPU" << std::string(15, '*') << "\n\n" << sub_str;
}
} // namespace server
......
......@@ -25,7 +25,7 @@ SignalUtil::HandleSignal(int signum) {
switch (signum) {
case SIGINT:
case SIGUSR2: {
SERVER_LOG_INFO << "Server received signal: " << signum;
LOG_SERVER_INFO_ << "Server received signal: " << signum;
server::Server& server = server::Server::GetInstance();
server.Stop();
......@@ -33,7 +33,7 @@ SignalUtil::HandleSignal(int signum) {
exit(0);
}
default: {
SERVER_LOG_INFO << "Server received critical signal: " << signum;
LOG_SERVER_INFO_ << "Server received critical signal: " << signum;
SignalUtil::PrintStacktrace();
server::Server& server = server::Server::GetInstance();
......@@ -46,7 +46,7 @@ SignalUtil::HandleSignal(int signum) {
void
SignalUtil::PrintStacktrace() {
SERVER_LOG_INFO << "Call stack:";
LOG_SERVER_INFO_ << "Call stack:";
const int size = 32;
void* array[size];
......@@ -54,7 +54,7 @@ SignalUtil::PrintStacktrace() {
char** stacktrace = backtrace_symbols(array, stack_num);
for (int i = 0; i < stack_num; ++i) {
std::string info = stacktrace[i];
SERVER_LOG_INFO << info;
LOG_SERVER_INFO_ << info;
}
free(stacktrace);
}
......
......@@ -40,31 +40,31 @@ TimeRecorder::PrintTimeRecord(const std::string& msg, double span) {
switch (log_level_) {
case 0: {
SERVER_LOG_TRACE << str_log;
LOG_SERVER_TRACE_ << str_log;
break;
}
case 1: {
SERVER_LOG_DEBUG << str_log;
LOG_SERVER_DEBUG_ << str_log;
break;
}
case 2: {
SERVER_LOG_INFO << str_log;
LOG_SERVER_INFO_ << str_log;
break;
}
case 3: {
SERVER_LOG_WARNING << str_log;
LOG_SERVER_WARNING_ << str_log;
break;
}
case 4: {
SERVER_LOG_ERROR << str_log;
LOG_SERVER_ERROR_ << str_log;
break;
}
case 5: {
SERVER_LOG_FATAL << str_log;
LOG_SERVER_FATAL_ << str_log;
break;
}
default: {
SERVER_LOG_INFO << str_log;
LOG_SERVER_INFO_ << str_log;
break;
}
}
......
......@@ -24,7 +24,7 @@ print_timestamp(const std::string& message) {
micros %= 1000000;
double millisecond = (double)micros / 1000.0;
SERVER_LOG_DEBUG << std::fixed << " " << millisecond << "(ms) [timestamp]" << message;
LOG_SERVER_DEBUG_ << std::fixed << " " << millisecond << "(ms) [timestamp]" << message;
}
class TimeRecorder {
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册