Slide 45
Slide 45 text
def model_fn(model_dir):
# Loads a model for inference
model = Your_Model()
return model
def input_fn(input_data, content_type):
# Deserializes the input data
return decoder.decode(input_data, content_type)
def predict_fn(input_data, model):
# Calls a model on data deseralized by input_fn
return model(input_data)
def output_fn(prediction, content_type):
# Serializes predictions from predict_fn
return encoder.encode(prediction, content_type)
https://github.com/aws/sagemaker-inference-toolkit
BYOM Deployment: Inference Handler Script
https://github.com/tensorflow/serving
def input_handler(data, context):
# Pre-process request input before it is sent to
TensorFlow Serving REST API
if context.request_content_type ==
'application/json':
pass
if context.request_content_type == 'text/csv':
pass
def output_handler(response, context):
# Post-process TensorFlow Serving output before
it is returned to the client
response_content_type = context.accept_header
prediction = response.content
return prediction, response_content_type