Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenDocCN
pycaret
提交
9224fe0a
pycaret
项目概览
OpenDocCN
/
pycaret
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
pycaret
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
未验证
提交
9224fe0a
编写于
1月 26, 2020
作者:
P
pycaret
提交者:
GitHub
1月 26, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add files via upload
上级
9ae0623d
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
681 addition
and
87 deletion
+681
-87
anomaly.py
anomaly.py
+139
-6
classification.py
classification.py
+191
-38
clustering.py
clustering.py
+162
-6
regression.py
regression.py
+189
-37
未找到文件。
anomaly.py
浏览文件 @
9224fe0a
...
...
@@ -2105,8 +2105,10 @@ def save_model(model, model_name, verbose=True):
def
load_model
(
model_name
,
verbose
=
True
):
def
load_model
(
model_name
,
platform
=
None
,
authentication
=
None
,
verbose
=
True
):
"""
...
...
@@ -2139,6 +2141,28 @@ def load_model(model_name, verbose=True):
"""
#exception checking
import
sys
if
platform
is
not
None
:
if
authentication
is
None
:
sys
.
exit
(
"(Value Error): Authentication is missing."
)
#cloud provider
if
platform
==
'aws'
:
import
boto3
bucketname
=
authentication
.
get
(
'bucket'
)
filename
=
str
(
model_name
)
+
'.pkl'
s3
=
boto3
.
resource
(
's3'
)
s3
.
Bucket
(
bucketname
).
download_file
(
filename
,
filename
)
filename
=
str
(
model_name
)
model
=
load_model
(
filename
,
verbose
=
False
)
if
verbose
:
print
(
'Transformation Pipeline and Model Sucessfully Loaded'
)
return
model
import
joblib
model_name
=
model_name
+
'.pkl'
...
...
@@ -2259,8 +2283,105 @@ def load_experiment(experiment_name):
def
deploy_model
(
model
,
model_name
,
authentication
,
platform
=
'aws'
):
"""
Description:
------------
This function deploys the transformation pipeline and trained model object
for production use. Platform of deployment can be defined under platform
param along with applicable authentication tokens to be passed as dictionary
in authentication param.
Example:
--------
from pycaret.datasets import get_data
anomaly = get_data('anomaly')
experiment_name = setup(data = anomaly, normalize=True)
knn = create_model('knn')
deploy_model(model = knn, model_name = 'deploy_knn', platform = 'aws',
authentication = {'bucket' : 'pycaret-test'})
This will deploy the model on AWS S3 account under bucket 'pycaret-test'
For AWS users:
--------------
Before deploying a model to AWS S3 ('aws'), environment variables must be
configured using command line interface. To configure AWS environment variables,
type aws configure in your python command line, it requires following information
that can be generated using Identity and Access Management (IAM) portal of your
amazon console account:
- AWS Access Key ID
- AWS Secret Key Access
- Default Region Name (can be seen under Global settings on your AWS console)
- Default output format (must be left blank)
Parameters
----------
model : object
A trained model object should be passed as an estimator.
model_name : string
Name of model to be passed as a string.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
platform: string, default = 'aws'
Name of platform for deployment. Current available options are: 'aws'.
Returns:
--------
Success Message
Warnings:
---------
None
"""
#general dependencies
import
ipywidgets
as
ipw
import
pandas
as
pd
from
IPython.display
import
clear_output
,
update_display
try
:
model
=
finalize_model
(
model
)
except
:
pass
if
platform
==
'aws'
:
import
boto3
save_model
(
model
,
model_name
=
model_name
,
verbose
=
False
)
#initiaze s3
s3
=
boto3
.
client
(
's3'
)
filename
=
str
(
model_name
)
+
'.pkl'
key
=
str
(
model_name
)
+
'.pkl'
bucket_name
=
authentication
.
get
(
'bucket'
)
s3
.
upload_file
(
filename
,
bucket_name
,
key
)
clear_output
()
print
(
"Model Succesfully Deployed on AWS S3"
)
def
predict_model
(
model
,
data
):
data
,
platform
=
None
,
authentication
=
None
):
"""
...
...
@@ -2281,7 +2402,9 @@ def predict_model(model,
Parameters
----------
model : object, default = None
model : object / string, default = None
When model is passed as string, load_model() is called internally to load the
pickle file from active directory or cloud platform when platform param is passed.
data : {array-like, sparse matrix}, shape (n_samples, n_features) where n_samples
is the number of samples and n_features is the number of features. All features
...
...
@@ -2305,6 +2428,7 @@ def predict_model(model,
#no active tests
#general dependencies
from
IPython.display
import
clear_output
,
update_display
import
numpy
as
np
import
pandas
as
pd
import
re
...
...
@@ -2315,10 +2439,17 @@ def predict_model(model,
#copy data and model
data__
=
data
.
copy
()
model_
=
deepcopy
(
model
)
clear_output
()
#check if estimator is string, then load model
if
type
(
model
)
is
str
:
model_
=
load_model
(
model
,
verbose
=
False
)
if
platform
==
'aws'
:
model_
=
load_model
(
str
(
model
),
platform
=
'aws'
,
authentication
=
{
'bucket'
:
authentication
.
get
(
'bucket'
)},
verbose
=
False
)
else
:
model_
=
load_model
(
str
(
model
),
verbose
=
False
)
#separate prep_data pipeline
if
type
(
model_
)
is
list
:
...
...
@@ -2341,8 +2472,10 @@ def predict_model(model,
#predictions start here
_data_
=
prep_pipe_transformer
.
transform
(
data__
)
pred
=
model
.
predict
(
_data_
)
pred_score
=
model
.
decision_scores_
data__
[
'Label'
]
=
pred
data__
[
'Score'
]
=
pred_score
return
data__
...
...
classification.py
浏览文件 @
9224fe0a
...
...
@@ -4623,7 +4623,7 @@ def stack_models(estimator_list,
#defining data_X and data_y
if
finalize
:
data_X
=
X
_
.
copy
()
data_X
=
X
.
copy
()
data_y
=
y
.
copy
()
else
:
data_X
=
X_train
.
copy
()
...
...
@@ -6350,7 +6350,11 @@ def save_model(model, model_name, verbose=True):
print
(
'Transformation Pipeline and Model Succesfully Saved'
)
def
load_model
(
model_name
,
verbose
=
True
):
def
load_model
(
model_name
,
platform
=
None
,
authentication
=
None
,
verbose
=
True
):
"""
...
...
@@ -6371,6 +6375,16 @@ def load_model(model_name, verbose=True):
----------
model_name : string, default = none
Name of pickle file to be passed as a string.
platform: string, default = None
Name of platform, if loading model from cloud. Current available options are:
'aws'.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
verbose: Boolean, default = True
Success message is not printed when verbose is set to False.
...
...
@@ -6385,8 +6399,29 @@ def load_model(model_name, verbose=True):
"""
#exception checking
import
sys
if
platform
is
not
None
:
if
authentication
is
None
:
sys
.
exit
(
"(Value Error): Authentication is missing."
)
#cloud provider
if
platform
==
'aws'
:
import
boto3
bucketname
=
authentication
.
get
(
'bucket'
)
filename
=
str
(
model_name
)
+
'.pkl'
s3
=
boto3
.
resource
(
's3'
)
s3
.
Bucket
(
bucketname
).
download_file
(
filename
,
filename
)
filename
=
str
(
model_name
)
model
=
load_model
(
filename
,
verbose
=
False
)
if
verbose
:
print
(
'Transformation Pipeline and Model Sucessfully Loaded'
)
return
model
import
joblib
model_name
=
model_name
+
'.pkl'
if
verbose
:
...
...
@@ -6504,8 +6539,103 @@ def load_experiment(experiment_name):
def
deploy_model
(
model
,
model_name
,
authentication
,
platform
=
'aws'
):
"""
Description:
------------
This function deploys the transformation pipeline and trained model object
for production use. Platform of deployment can be defined under platform
param along with applicable authentication tokens to be passed as dictionary
in authentication param.
Example:
--------
from pycaret.datasets import get_data
juice = get_data('juice')
experiment_name = setup(data = juice, target = 'Purchase')
lr = create_model('lr')
deploy_model(model = lr, model_name = 'deploy_lr', platform = 'aws',
authentication = {'bucket' : 'pycaret-test'})
This will deploy the model on AWS S3 account under bucket 'pycaret-test'
For AWS users:
--------------
Before deploying a model to AWS S3 ('aws'), environment variables must be
configured using command line interface. To configure AWS environment variables,
type aws configure in your python command line, it requires following information
that can be generated using Identity and Access Management (IAM) portal of your
amazon console account:
- AWS Access Key ID
- AWS Secret Key Access
- Default Region Name (can be seen under Global settings on your AWS console)
- Default output format (must be left blank)
Parameters
----------
model : object
A trained model object should be passed as an estimator.
model_name : string
Name of model to be passed as a string.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
platform: string, default = 'aws'
Name of platform for deployment. Current available options are: 'aws'.
Returns:
--------
Success Message
Warnings:
---------
None
"""
#general dependencies
import
ipywidgets
as
ipw
import
pandas
as
pd
from
IPython.display
import
clear_output
,
update_display
try
:
model
=
finalize_model
(
model
)
except
:
pass
if
platform
==
'aws'
:
import
boto3
save_model
(
model
,
model_name
=
model_name
,
verbose
=
False
)
#initiaze s3
s3
=
boto3
.
client
(
's3'
)
filename
=
str
(
model_name
)
+
'.pkl'
key
=
str
(
model_name
)
+
'.pkl'
bucket_name
=
authentication
.
get
(
'bucket'
)
s3
.
upload_file
(
filename
,
bucket_name
,
key
)
clear_output
()
print
(
"Model Succesfully Deployed on AWS S3"
)
def
predict_model
(
estimator
,
data
=
None
):
data
=
None
,
platform
=
None
,
authentication
=
None
):
"""
...
...
@@ -6529,12 +6659,24 @@ def predict_model(estimator,
Parameters
----------
estimator : object or list of objects, default = None
estimator : object or list of objects / string, default = None
When estimator is passed as string, load_model() is called internally to load the
pickle file from active directory or cloud platform when platform param is passed.
data : {array-like, sparse matrix}, shape (n_samples, n_features) where n_samples
is the number of samples and n_features is the number of features. All features
used during training must be present in the new dataset.
platform: string, default = None
Name of platform, if loading model from cloud. Current available options are:
'aws'.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
Returns:
--------
...
...
@@ -6564,12 +6706,39 @@ def predict_model(estimator,
from
copy
import
deepcopy
from
IPython.display
import
clear_output
,
update_display
if
type
(
estimator
)
is
str
:
if
platform
==
'aws'
:
estimator
=
load_model
(
str
(
estimator
),
platform
=
'aws'
,
authentication
=
{
'bucket'
:
authentication
.
get
(
'bucket'
)},
verbose
=
False
)
else
:
estimator
=
load_model
(
str
(
estimator
),
verbose
=
False
)
estimator
=
deepcopy
(
estimator
)
estimator_
=
estimator
clear_output
()
#check if estimator is string, then load model
if
type
(
estimator
)
is
str
:
estimator
=
load_model
(
estimator
,
verbose
=
False
)
if
type
(
estimator_
)
is
list
:
if
'sklearn.pipeline.Pipeline'
in
str
(
type
(
estimator_
[
0
])):
prep_pipe_transformer
=
estimator_
.
pop
(
0
)
model
=
estimator_
[
0
]
estimator
=
estimator_
[
0
]
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
#dataset
if
data
is
None
:
...
...
@@ -6584,42 +6753,25 @@ def predict_model(estimator,
X_test_
.
reset_index
(
drop
=
True
,
inplace
=
True
)
y_test_
.
reset_index
(
drop
=
True
,
inplace
=
True
)
model
=
estimator
else
:
estimator_
=
estimator
if
type
(
estimator_
)
is
list
:
if
'sklearn.pipeline.Pipeline'
in
str
(
type
(
estimator_
[
0
])):
prep_pipe_transformer
=
estimator_
.
pop
(
0
)
model
=
estimator_
[
0
]
estimator
=
estimator_
[
0
]
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
else
:
try
:
model
=
finalize_model
(
estimator
)
except
:
model
=
estimator
Xtest
=
prep_pipe_transformer
.
transform
(
data
)
X_test_
=
data
.
copy
()
#original concater
Xtest
.
reset_index
(
drop
=
True
,
inplace
=
True
)
X_test_
.
reset_index
(
drop
=
True
,
inplace
=
True
)
estimator_
=
estimator
#try:
# model = finalize_model(estimator)
#except:
# model = estimator
if
type
(
estimator
)
is
list
:
...
...
@@ -7044,4 +7196,5 @@ def predict_model(estimator,
pass
return
X_test_
\ No newline at end of file
return
X_test_
clustering.py
浏览文件 @
9224fe0a
...
...
@@ -2181,7 +2181,10 @@ def save_model(model, model_name, verbose=True):
def
load_model
(
model_name
,
verbose
=
True
):
def
load_model
(
model_name
,
platform
=
None
,
authentication
=
None
,
verbose
=
True
):
"""
...
...
@@ -2203,6 +2206,19 @@ def load_model(model_name, verbose=True):
model_name : string, default = none
Name of pickle file to be passed as a string.
platform: string, default = None
Name of platform, if loading model from cloud. Current available options are:
'aws'.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
verbose: Boolean, default = True
Success message is not printed when verbose is set to False.
Returns:
--------
Success Message
...
...
@@ -2214,7 +2230,29 @@ def load_model(model_name, verbose=True):
"""
#exception checking
import
sys
if
platform
is
not
None
:
if
authentication
is
None
:
sys
.
exit
(
"(Value Error): Authentication is missing."
)
#cloud provider
if
platform
==
'aws'
:
import
boto3
bucketname
=
authentication
.
get
(
'bucket'
)
filename
=
str
(
model_name
)
+
'.pkl'
s3
=
boto3
.
resource
(
's3'
)
s3
.
Bucket
(
bucketname
).
download_file
(
filename
,
filename
)
filename
=
str
(
model_name
)
model
=
load_model
(
filename
,
verbose
=
False
)
if
verbose
:
print
(
'Transformation Pipeline and Model Sucessfully Loaded'
)
return
model
import
joblib
model_name
=
model_name
+
'.pkl'
if
verbose
:
...
...
@@ -2223,6 +2261,7 @@ def load_model(model_name, verbose=True):
def
save_experiment
(
experiment_name
=
None
):
...
...
@@ -2331,8 +2370,104 @@ def load_experiment(experiment_name):
return
exp
def
deploy_model
(
model
,
model_name
,
authentication
,
platform
=
'aws'
):
"""
Description:
------------
This function deploys the transformation pipeline and trained model object
for production use. Platform of deployment can be defined under platform
param along with applicable authentication tokens to be passed as dictionary
in authentication param.
Example:
--------
from pycaret.datasets import get_data
jewellery = get_data('jewellery')
experiment_name = setup(data = jewellery, normalize = True)
kmeans = create_model('kmeans')
deploy_model(model = kmeans, model_name = 'deploy_kmeans', platform = 'aws',
authentication = {'bucket' : 'pycaret-test'})
This will deploy the model on AWS S3 account under bucket 'pycaret-test'
For AWS users:
--------------
Before deploying a model to AWS S3 ('aws'), environment variables must be
configured using command line interface. To configure AWS environment variables,
type aws configure in your python command line, it requires following information
that can be generated using Identity and Access Management (IAM) portal of your
amazon console account:
- AWS Access Key ID
- AWS Secret Key Access
- Default Region Name (can be seen under Global settings on your AWS console)
- Default output format (must be left blank)
Parameters
----------
model : object
A trained model object should be passed as an estimator.
model_name : string
Name of model to be passed as a string.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
platform: string, default = 'aws'
Name of platform for deployment. Current available options are: 'aws'.
Returns:
--------
Success Message
Warnings:
---------
None
"""
#general dependencies
import
ipywidgets
as
ipw
import
pandas
as
pd
from
IPython.display
import
clear_output
,
update_display
try
:
model
=
finalize_model
(
model
)
except
:
pass
if
platform
==
'aws'
:
import
boto3
save_model
(
model
,
model_name
=
model_name
,
verbose
=
False
)
#initiaze s3
s3
=
boto3
.
client
(
's3'
)
filename
=
str
(
model_name
)
+
'.pkl'
key
=
str
(
model_name
)
+
'.pkl'
bucket_name
=
authentication
.
get
(
'bucket'
)
s3
.
upload_file
(
filename
,
bucket_name
,
key
)
clear_output
()
print
(
"Model Succesfully Deployed on AWS S3"
)
def
predict_model
(
model
,
data
):
data
,
platform
=
None
,
authentication
=
None
):
"""
...
...
@@ -2353,12 +2488,24 @@ def predict_model(model,
Parameters
----------
model : object, default = None
model : object / string, default = None
When model is passed as string, load_model() is called internally to load the
pickle file from active directory or cloud platform when platform param is passed.
data : {array-like, sparse matrix}, shape (n_samples, n_features) where n_samples
is the number of samples and n_features is the number of features. All features
used during training must be present in the new dataset.
platform: string, default = None
Name of platform, if loading model from cloud. Current available options are:
'aws'.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
Returns:
--------
...
...
@@ -2377,6 +2524,7 @@ def predict_model(model,
#no active tests
#general dependencies
from
IPython.display
import
clear_output
,
update_display
import
numpy
as
np
import
pandas
as
pd
import
re
...
...
@@ -2387,11 +2535,18 @@ def predict_model(model,
#copy data and model
data__
=
data
.
copy
()
model_
=
deepcopy
(
model
)
clear_output
()
#check if estimator is string, then load model
if
type
(
model
)
is
str
:
model_
=
load_model
(
model
,
verbose
=
False
)
if
platform
==
'aws'
:
model_
=
load_model
(
str
(
model
),
platform
=
'aws'
,
authentication
=
{
'bucket'
:
authentication
.
get
(
'bucket'
)},
verbose
=
False
)
else
:
model_
=
load_model
(
str
(
model
),
verbose
=
False
)
#separate prep_data pipeline
if
type
(
model_
)
is
list
:
prep_pipe_transformer
=
model_
[
0
]
...
...
@@ -2425,6 +2580,7 @@ def predict_model(model,
return
data__
def
get_clusters
(
data
,
model
=
None
,
num_clusters
=
4
,
...
...
regression.py
浏览文件 @
9224fe0a
...
...
@@ -5350,7 +5350,11 @@ def save_model(model, model_name, verbose=True):
print
(
'Transformation Pipeline and Model Succesfully Saved'
)
def
load_model
(
model_name
,
verbose
=
True
):
def
load_model
(
model_name
,
platform
=
None
,
authentication
=
None
,
verbose
=
True
):
"""
...
...
@@ -5372,6 +5376,16 @@ def load_model(model_name, verbose=True):
model_name : string, default = none
Name of pickle file to be passed as a string.
platform: string, default = None
Name of platform, if loading model from cloud. Current available options are:
'aws'.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
verbose: Boolean, default = True
Success message is not printed when verbose is set to False.
...
...
@@ -5383,11 +5397,33 @@ def load_model(model_name, verbose=True):
---------
None
"""
#exception checking
import
sys
if
platform
is
not
None
:
if
authentication
is
None
:
sys
.
exit
(
"(Value Error): Authentication is missing."
)
#cloud provider
if
platform
==
'aws'
:
import
boto3
bucketname
=
authentication
.
get
(
'bucket'
)
filename
=
str
(
model_name
)
+
'.pkl'
s3
=
boto3
.
resource
(
's3'
)
s3
.
Bucket
(
bucketname
).
download_file
(
filename
,
filename
)
filename
=
str
(
model_name
)
model
=
load_model
(
filename
,
verbose
=
False
)
if
verbose
:
print
(
'Transformation Pipeline and Model Sucessfully Loaded'
)
return
model
import
joblib
model_name
=
model_name
+
'.pkl'
if
verbose
:
...
...
@@ -5395,6 +5431,7 @@ def load_model(model_name, verbose=True):
return
joblib
.
load
(
model_name
)
def
save_experiment
(
experiment_name
=
None
):
...
...
@@ -5505,8 +5542,103 @@ def load_experiment(experiment_name):
def
deploy_model
(
model
,
model_name
,
authentication
,
platform
=
'aws'
):
"""
Description:
------------
This function deploys the transformation pipeline and trained model object
for production use. Platform of deployment can be defined under platform
param along with applicable authentication tokens to be passed as dictionary
in authentication param.
Example:
--------
from pycaret.datasets import get_data
boston = get_data('boston')
experiment_name = setup(data = boston, target = 'medv')
lr = create_model('lr')
deploy_model(model = lr, model_name = 'deploy_lr', platform = 'aws',
authentication = {'bucket' : 'pycaret-test'})
This will deploy the model on AWS S3 account under bucket 'pycaret-test'
For AWS users:
--------------
Before deploying a model to AWS S3 ('aws'), environment variables must be
configured using command line interface. To configure AWS environment variables,
type aws configure in your python command line, it requires following information
that can be generated using Identity and Access Management (IAM) portal of your
amazon console account:
- AWS Access Key ID
- AWS Secret Key Access
- Default Region Name (can be seen under Global settings on your AWS console)
- Default output format (must be left blank)
Parameters
----------
model : object
A trained model object should be passed as an estimator.
model_name : string
Name of model to be passed as a string.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
platform: string, default = 'aws'
Name of platform for deployment. Current available options are: 'aws'.
Returns:
--------
Success Message
Warnings:
---------
None
"""
#general dependencies
import
ipywidgets
as
ipw
import
pandas
as
pd
from
IPython.display
import
clear_output
,
update_display
try
:
model
=
finalize_model
(
model
)
except
:
pass
if
platform
==
'aws'
:
import
boto3
save_model
(
model
,
model_name
=
model_name
,
verbose
=
False
)
#initiaze s3
s3
=
boto3
.
client
(
's3'
)
filename
=
str
(
model_name
)
+
'.pkl'
key
=
str
(
model_name
)
+
'.pkl'
bucket_name
=
authentication
.
get
(
'bucket'
)
s3
.
upload_file
(
filename
,
bucket_name
,
key
)
clear_output
()
print
(
"Model Succesfully Deployed on AWS S3"
)
def
predict_model
(
estimator
,
data
=
None
,
platform
=
None
,
authentication
=
None
,
round
=
4
):
"""
...
...
@@ -5531,12 +5663,24 @@ def predict_model(estimator,
Parameters
----------
estimator : object or list of objects, default = None
estimator : object or list of objects / string, default = None
When estimator is passed as string, load_model() is called internally to load the
pickle file from active directory or cloud platform when platform param is passed.
data : {array-like, sparse matrix}, shape (n_samples, n_features) where n_samples
is the number of samples and n_features is the number of features. All features
used during training must be present in the new dataset.
platform: string, default = None
Name of platform, if loading model from cloud. Current available options are:
'aws'.
authentication : dict
dictionary of applicable authentication tokens.
When platform = 'aws':
{'bucket' : 'Name of Bucket on S3'}
round: integer, default = 4
Number of decimal places the predicted labels will be rounded to.
...
...
@@ -5569,13 +5713,39 @@ def predict_model(estimator,
from
copy
import
deepcopy
from
IPython.display
import
clear_output
,
update_display
if
type
(
estimator
)
is
str
:
if
platform
==
'aws'
:
estimator
=
load_model
(
str
(
estimator
),
platform
=
'aws'
,
authentication
=
{
'bucket'
:
authentication
.
get
(
'bucket'
)},
verbose
=
False
)
else
:
estimator
=
load_model
(
str
(
estimator
),
verbose
=
False
)
estimator
=
deepcopy
(
estimator
)
estimator_
=
estimator
clear_output
()
#check if estimator is string, then load model
if
type
(
estimator
)
is
str
:
estimator
=
load_model
(
estimator
,
verbose
=
False
)
if
type
(
estimator_
)
is
list
:
if
'sklearn.pipeline.Pipeline'
in
str
(
type
(
estimator_
[
0
])):
prep_pipe_transformer
=
estimator_
.
pop
(
0
)
model
=
estimator_
[
0
]
estimator
=
estimator_
[
0
]
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
#dataset
if
data
is
None
:
...
...
@@ -5590,43 +5760,24 @@ def predict_model(estimator,
y_test_
.
reset_index
(
drop
=
True
,
inplace
=
True
)
model
=
estimator
else
:
estimator_
=
estimator
if
type
(
estimator_
)
is
list
:
if
'sklearn.pipeline.Pipeline'
in
str
(
type
(
estimator_
[
0
])):
prep_pipe_transformer
=
estimator_
.
pop
(
0
)
model
=
estimator_
[
0
]
estimator
=
estimator_
[
0
]
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
else
:
prep_pipe_transformer
=
prep_pipe
model
=
estimator
estimator
=
estimator
else
:
try
:
model
=
finalize_model
(
estimator
)
except
:
model
=
estimator
Xtest
=
prep_pipe_transformer
.
transform
(
data
)
X_test_
=
data
.
copy
()
#original concater
Xtest
.
reset_index
(
drop
=
True
,
inplace
=
True
)
X_test_
.
reset_index
(
drop
=
True
,
inplace
=
True
)
estimator_
=
estimator
#try:
# model = finalize_model(estimator)
#except:
# model = estimator
if
type
(
estimator
)
is
list
:
if
type
(
estimator
[
0
])
is
list
:
...
...
@@ -5925,3 +6076,4 @@ def predict_model(estimator,
X_test_
=
pd
.
concat
([
X_test_
,
label
],
axis
=
1
)
return
X_test_
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录