提交 0ba63475 编写于 作者: L liaogang

ENH: Add buddy allocator Free

上级 379434b2
if(${WITH_GPU})
nv_library(system_allocator SRCS system_allocator.cc DEPS gflags)
nv_test(system_allocator_test
SRCS system_allocator_test.cc
DEPS system_allocator gpu_info gflags)
nv_library(system_allocator SRCS system_allocator.cc DEPS gflags gpu_info)
else(${WITH_GPU})
cc_library(system_allocator SRCS system_allocator.cc DEPS gflags)
cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator gflags)
endif(${WITH_GPU})
cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator)
cc_library(metadata SRCS metadata.cc)
cc_library(meta_data SRCS meta_data.cc)
cc_library(buddy_allocator SRCS buddy_allocator.cc)
......@@ -75,10 +75,49 @@ void* BuddyAllocator::Alloc(size_t unaligned_size) {
}
void BuddyAllocator::Free(void* p) {
// Point back to metadata
auto block = static_cast<MemoryBlock*>(p)->metadata();
// acquire the allocator lock
// Acquire the allocator lock
std::lock_guard<std::mutex> lock(mutex_);
DLOG(INFO) << "Free from address " << block;
if (block->type(cache_) == MemoryBlock::HUGE_CHUNK) {
DLOG(INFO) << "Free directly from system allocator";
system_allocator_->Free(block, block->total_size(cache_),
block->index(cache_));
// Invalidate GPU allocation from cache
if (system_allocator_->UseGpu()) {
cache_.erase(block);
}
return;
}
block->mark_as_free(cache_);
total_used_ -= block->total_size(cache_);
total_free_ += block->total_size(cache_);
// Trying to merge the right buddy
if (block->has_right_buddy(cache_)) {
DLOG(INFO) << "Merging this block " << block << " with its right buddy "
<< block->right_buddy(cache_);
}
// Trying to merge the left buddy
if (block->has_left_buddy(cache_)) {
DLOG(INFO) << "Merging this block " << block << " with its left buddy "
<< block->left_buddy(cache_);
}
// Dumping this block into pool
DLOG(INFO) << "Inserting free block (" << block << ", "
<< block->total_size(cache_) << ")";
pool_.insert({block->index(cache_), block->total_size(cache_), block});
// TODO(gangliao): Clean up if existing too much free memory
}
void* BuddyAllocator::SystemAlloc(size_t size) {
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/memory/detail/metadata.h"
#include "paddle/memory/detail/meta_data.h"
#include "paddle/memory/detail/system_allocator.h"
#include "paddle/platform/assert.h"
#include "paddle/platform/cpu_info.h"
......
......@@ -14,7 +14,7 @@
#pragma once
#include "paddle/memory/detail/metadata.h"
#include "paddle/memory/detail/meta_data.h"
#include <cstddef>
#include <unordered_map>
......
......@@ -12,7 +12,7 @@
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/memory/detail/metadata.h"
#include "paddle/memory/detail/meta_data.h"
#include <functional>
......
......@@ -28,15 +28,5 @@ size_t CpuMinChunkSize();
//! Get the maximum chunk size for buddy allocator.
size_t CpuMaxChunkSize();
int GetCurrentDeviceId(void) {
int device_id;
throw_on_error(cudaGetDevice(&device_id), "cudaGetDevice failed");
return device_id;
}
void SetDeviceId(int device_id) {
throw_on_error(cudaSetDevice(device_id), "cudaSetDevice failed");
}
} // namespace platform
} // namespace paddle
......@@ -31,6 +31,19 @@ int GpuDeviceCount() {
return count;
}
int GetCurrentDeviceId() {
int device_id;
throw_on_error(
cudaGetDevice(&device_id),
"cudaGetDevice failed in paddle::platform::GetCurrentDeviceId");
return device_id;
}
void SetDeviceId(int id) {
throw_on_error(cudaSetDevice(id),
"cudaSetDevice failed in paddle::platform::SetDeviceId");
}
void GpuMemoryUsage(size_t& available, size_t& total) {
throw_on_error(cudaMemGetInfo(&available, &total),
"cudaMemGetInfo failed in paddle::platform::GetMemoryUsage");
......
......@@ -24,6 +24,12 @@ namespace platform {
//! Get the total number of GPU devices in system.
int GpuDeviceCount();
//! Get the current GPU device id in system.
int GetCurrentDeviceId();
//! Set the GPU device id for next execution.
void SetDeviceId(int device_id);
//!Get the memory usage of current GPU device.
void GpuMemoryUsage(size_t& available, size_t& total);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册