From 92bb789628b142d5d3133062d3e0afa7a02abcca Mon Sep 17 00:00:00 2001 From: Berkin Ilbeyi Date: Fri, 30 Oct 2020 18:12:13 -0700 Subject: [PATCH] [XLA] Don't unnecessarily process buffers that got default mem allocations. When processing buffers that are placed in tuples, we create (possibly redundant) additional tuple and gte instructions, which we later simplify. We don't need to create these additional instructions for buffers that were unchanged after memory space assignment. We were facing compile time and OOM issues without this change. PiperOrigin-RevId: 339972236 Change-Id: Ieb82767ed52d39260141f3e3179387fbf5783920 --- tensorflow/compiler/xla/service/memory_space_assignment.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow/compiler/xla/service/memory_space_assignment.cc b/tensorflow/compiler/xla/service/memory_space_assignment.cc index e424daa56b2..ab0a87aae99 100644 --- a/tensorflow/compiler/xla/service/memory_space_assignment.cc +++ b/tensorflow/compiler/xla/service/memory_space_assignment.cc @@ -2729,6 +2729,12 @@ void MemorySpaceAssignment::Allocation::AddUse(HloUse use) { Status MemorySpaceAssignment::Allocation::Process( MemorySpaceAssignment* memory_space_assignment) { + // No need to do anything if this is an allocation in the default memory + // space. The calls below may insert redundant tuple/get-tuple-element + // instructions, unnecessarily increasing memory and compile time. + if (memory_space() == MemorySpace::kDefault) { + return Status::OK(); + } HloInstruction* producing_instruction = AddGetTupleElements(); HloComputation* computation = producing_instruction->parent(); for (const HloUse& use : uses_) { -- GitLab