diff --git a/oneflow/core/job/scheduler.cpp b/oneflow/core/job/scheduler.cpp index 6bcd371c64a06634c457160834ac61859c2ff5c5..406d6645d02c7f209979593eef00c55a56ce53d8 100644 --- a/oneflow/core/job/scheduler.cpp +++ b/oneflow/core/job/scheduler.cpp @@ -36,11 +36,6 @@ void Scheduler::Process(const std::string& job_conf_filepath, NewAllSingleton(job_conf_filepath, this_machine_name, env); auto plan = of_make_unique(); std::string naive_plan_filepath = JoinPath(LogDir(), "naive_plan"); - //#ifdef PLATFORM_WINDOWS - // char current_dir[128]; - // GetCurrentDirectory(100, current_dir); - // LOG(INFO) << "current_dir is " << current_dir << "\n"; - //#endif // PLATFORM_WINDOWS // Compile if (RuntimeCtx::Singleton()->IsThisMachineMaster()) { std::stringstream compile_cmd; diff --git a/oneflow/core/persistence/file_system.cpp b/oneflow/core/persistence/file_system.cpp index d831deed08e31cc3d5fd8fbd256ddeff71f46ac3..09246c55af367b3f626218ed01e71ee1d90228b7 100644 --- a/oneflow/core/persistence/file_system.cpp +++ b/oneflow/core/persistence/file_system.cpp @@ -94,7 +94,7 @@ struct GlobalFSConstructor { const GlobalFSConf& gfs_conf = JobDesc::Singleton()->job_conf().global_fs_conf(); if (gfs_conf.has_localfs_conf()) { - // CHECK_EQ(JobDesc::Singleton()->resource().machine().size(), 1); + CHECK_EQ(JobDesc::Singleton()->resource().machine().size(), 1); gfs = LocalFS(); } else if (gfs_conf.has_hdfs_conf()) { gfs = new HadoopFileSystem(gfs_conf.hdfs_conf());