import SwiftUI
import CoreML

class ModelProvider {
   static let shared = ModelProvider()
   let model: MNISTClassifier?
   let images = ["img_0", "img_1", "img_2", "img_3", "img_4", "img_5", "img_6", "img_7", "img_8", "img_9"]

   private init() {
      let config = MLModelConfiguration()
      self.model = try? MNISTClassifier(configuration: config)
   }
   func prediction(image: String) -> String {
      var className = "Not Sure"
      if let model = model, let input = convertImage(image: image) {
         do {
            let prediction = try model.prediction(image: input)
            className = String(prediction.classLabel)
         } catch {
            print(error)
         }
      }
      return className
   }
   func convertImage(image: String) -> CVPixelBuffer? {
      #if os(macOS)
      guard let image = NSImage(named: image) else {
         return nil
      }
      guard let cgImage = image.cgImage(forProposedRect: nil, context: nil, hints: nil) else {
         return nil
      }
      #else
      guard let uiImage = UIImage(named: image), let cgImage = uiImage.cgImage else {
         return nil
      }
      #endif
      
      let ciImage = CIImage(cgImage: cgImage)
      let width = 28
      let height = 28

      var pixelBuffer: CVPixelBuffer?
      CVPixelBufferCreate(kCFAllocatorDefault, width, height, kCVPixelFormatType_OneComponent8, nil, &pixelBuffer)
      guard let buffer = pixelBuffer else { return nil }

      let context = CIContext()
      context.render(ciImage.transformed(by: CGAffineTransform(scaleX: CGFloat(width) / ciImage.extent.width, y: CGFloat(height) / ciImage.extent.height)), to: buffer, bounds: CGRect(x: 0, y: 0, width: width, height: height), colorSpace: CGColorSpaceCreateDeviceRGB()
      )
      return buffer
   }
}