Creating Request with VNCoreMLRequest
First of all, assign "I'm investigating..." to resultLabel.text.
func recognizeImage(image: CIImage) {
resultLabel.text = "I'm investigating..."
}
Assign the return value of VNCoreMLModel
function to variable model.
func recognizeImage(image: CIImage) {
resultLabel.text = "I'm investigating..."
if let model = try? VNCoreMLModel(for: GoogLeNetPlaces().model) {
}
}
Create the request with VNCoreMLRequest
function and send the variable model we created above as parameter which is type model
.
func recognizeImage(image: CIImage) {
resultLabel.text = "I'm investigating..."
if let model = try? VNCoreMLModel(for: GoogLeNetPlaces().model) {
let request = VNCoreMLRequest(model: model, completionHandler: { (vnrequest, error) in
})
}
}
Assign the results of type VNClassificationObservation
to results.
func recognizeImage(image: CIImage) {
resultLabel.text = "I'm investigating..."
if let model = try? VNCoreMLModel(for: GoogLeNetPlaces().model) {
let request = VNCoreMLRequest(model: model, completionHandler: { (vnrequest, error) in
if let results = vnrequest.results as? [VNClassificationObservation] {
}
})
}
}
Assign the first result to topResult.
func recognizeImage(image: CIImage) {
resultLabel.text = "I'm investigating..."
if let model = try? VNCoreMLModel(for: GoogLeNetPlaces().model) {
let request = VNCoreMLRequest(model: model, completionHandler: { (vnrequest, error) in
if let results = vnrequest.results as? [VNClassificationObservation] {
let topResult = results.first
}
})
}
}
Implement the DispatchQueue.main.async
method that will work on the background to create an assumption.
func recognizeImage(image: CIImage) {
resultLabel.text = "I'm investigating..."
if let model = try? VNCoreMLModel(for: GoogLeNetPlaces().model) {
let request = VNCoreMLRequest(model: model, completionHandler: { (vnrequest, error) in
if let results = vnrequest.results as? [VNClassificationObservation] {
let topResult = results.first
DispatchQueue.main.async {
}
}
})
}
}
Compute the confidence rate of the value stored in topResult and assign it to confidenceRate. And then, for display purposes resultLabel.text is used.
func recognizeImage(image: CIImage) {
resultLabel.text = "I'm investigating..."
if let model = try? VNCoreMLModel(for: GoogLeNetPlaces().model) {
let request = VNCoreMLRequest(model: model, completionHandler: { (vnrequest, error) in
if let results = vnrequest.results as? [VNClassificationObservation] {
let topResult = results.first
DispatchQueue.main.async {
let confidenceRate = (topResult?.confidence)! * 100
self.resultLabel.text = "\(confidenceRate)% it's \(String(describing: topResult?.identifier))"
}
}
})
}
}