import coremltools as ct
# Load TensorFlow model
import tensorflow as tf # Tf 2.2.0
tf_model = tf.keras.applications.Xception(weights="imagenet",
input_shape=(299, 299, 3))
convert()
# Convert using the same API
model_from_tf = ct.convert(tf_model)
model_from_tf.save("imagenet.mlmodel")
import tensorflow as tf
import coremltools as ct
tf_keras_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax),
]
)
# Pass in `tf.keras.Model` to the Unified Conversion API
mlmodel = ct.convert(tf_keras_model)
# or save the keras model in SavedModel directory format and then convert
tf_keras_model.save('tf_keras_model')
mlmodel = ct.convert('tf_keras_model')
# or load the model from a SavedModel and then convert
tf_keras_model = tf.keras.models.load_model('tf_keras_model')
mlmodel = ct.convert(tf_keras_model)
# or save the keras model in HDF5 format and then convert
tf_keras_model.save('tf_keras_model.h5')
mlmodel = ct.convert('tf_keras_model.h5')
# save converted model
mlmodel.save("trainedmodel.mlmodel")
guard let model = try? VNCoreMLModel(for: FaceParsing().model) else {
fatalError("Loading CoreML Model Failed.")
}
let handler : VNImageRequestHandler = VNImageRequestHandler(ciImage: inputImg as! CIImage)
do{
try! handler.perform([request])
}catch{
print("error")
}
let request = VNCoreMLRequest(model: model) {
request, error in
guard let results = request.results as? [VNCoreMLFeatureValueObservation],
let segmentationmap = results.first?.featureValue.multiArrayValue,
let row = segmentationmap.shape[0] as? Int,
let col = segmentationmap.shape[1] as? Int else {
fatalError("Model failed to process images.")
}
self.model_results = results
self.model_segmentationmap = segmentationmap
}
실제 코드
guard let model = try? VNCoreMLModel(for: FaceParsing().model) else {
fatalError("Loading CoreML Model Failed.")
}
let request = VNCoreMLRequest(model: model) {
request, error in
guard let results = request.results as? [VNCoreMLFeatureValueObservation],
let segmentationmap = results.first?.featureValue.multiArrayValue,
let row = segmentationmap.shape[0] as? Int,
let col = segmentationmap.shape[1] as? Int else {
fatalError("Model failed to process images.")
}
self.model_results = results
self.model_segmentationmap = segmentationmap
}
let handler : VNImageRequestHandler = VNImageRequestHandler(ciImage: inputImg as! CIImage)
do{
try! handler.perform([request])
}catch{
print("error")
}
Ref)
https://pilgwon.github.io/blog/2017/09/18/Smart-Gesture-Recognition-CoreML-TensorFlow.html
https://medium.com/@JMangia/swift-loves-tensorflow-and-coreml-2a11da25d44