CannyX

项目汇报

目标一:实时调用摄像头输出视频流到屏幕,提供按钮选择前置或后置

使用 AVCaptureSession( ) 来调用摄像头

let session = AVCaptureSession()
var previewLayer: AVCaptureVideoPreviewLayer!
var sequenceHandler = VNSequenceRequestHandler()
let dataOutputQueue = DispatchQueue(
  label: "video data queue",
  qos: .userInitiated,
  attributes: [],
  autoreleaseFrequency: .workItem)

处理摄像头的参数

    func configureCaptureSession() {
    // Define the capture device we want to use
    // default position : .back
    guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera,
                                               for: .video,
                                               position: .back) else {
      fatalError("No back video camera available")
    }

    // Connect the camera to the capture session input
    do {
      let cameraInput = try AVCaptureDeviceInput(device: camera)
      session.addInput(cameraInput)
    } catch {
      fatalError(error.localizedDescription)
    }

    // Create the video data output
    let videoOutput = AVCaptureVideoDataOutput()
    videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
    videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]

    // Add the video output to the capture session
    session.addOutput(videoOutput)

    let videoConnection = videoOutput.connection(with: .video)
    videoConnection?.videoOrientation = .portrait
    // Configure the preview layer
    previewLayer = AVCaptureVideoPreviewLayer(session: session)
    previewLayer.videoGravity = .resizeAspectFill
    previewLayer.frame = view.bounds
    view.layer.insertSublayer(previewLayer, at: 0)
  }

处理前置后置摄像头的替换

// Button Action function
@IBAction func cameraChangedButton(_ sender: Any) {
  // stop running the session to change camera state
  session.stopRunning() 

  // remove the current camera setting
    let currentCameraInput: AVCaptureInput = session.inputs[0]
  session.removeInput(currentCameraInput) 

  // setting new camera .back to .front and .front to .back
  newCamera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)

  //adding new camera setting
  let cameraInput = try AVCaptureDeviceInput(device: newCamera)
          session.addInput(cameraInput)
          session.outputs[0].connection(with: .video)?.videoOrientation = .portrait
  // Check if need to mirror the video
            session.outputs[0].connection(with: .video)?.isVideoMirrored = (cameraChangeButtonState.currentTitle == "Front" ? true : false)
    session.startRunning()
} 

目标二:提供按钮支持边缘检测功能

使用 Cocoapods 导入 OpenCV 库

pod 'OpenCV'

在 Swift 中无法直接调用 C++ 的库,需要用 OC 进行调用,并进行桥接实现混编。因此在本项目中,edgeDetector 作为 C++ 文件直接使用 OpenCV 库中的 Canny 函数检测边缘,并通过 OpenCVWrapper (OC 文件) 调用 edgeDetector 中的类, 最后在 Project-Bridging-Header.h 中桥接在 ViewController 中使用相关的 OC 函数

此处需将 OpenCVWrapper.m 改为 OpenCVWrapper.mm 以进行 OC 和 C++ 的混编

@implementation OpenCVWrapper : NSObject
- (UIImage *) detectEdgeIn: (UIImage *) image{
    // convert uiimage to mat
        cv::Mat opencvImage;
        UIImageToMat(image, opencvImage, true);
    // convert colorspace to the one expected by the lane detector algorithm (RGB)
        cv::Mat convertedColorSpaceImage;
        cv::cvtColor(opencvImage, convertedColorSpaceImage, COLOR_RGBA2RGB);

    // Run lane detection
    edgeDetector Detector;
    cv::Mat imageWithEdgeDetected = Detector.detect_edges(convertedColorSpaceImage);

    // convert mat to uiimage and return it to the caller
    return MatToUIImage(imageWithEdgeDetected);
}
@end

在 edgeDetector 中用获取边缘图

Mat edgeDetector::detect_edges(Mat image) {

    Mat greyScaledImage;
    cvtColor(image, greyScaledImage, COLOR_RGB2GRAY);

    Mat edgedOnlyImage;
    Canny(greyScaledImage, edgedOnlyImage, 50, 120);
    Mat newBImg(edgedOnlyImage.rows, edgedOnlyImage.cols, edgedOnlyImage.type());
    uchar* newBImgData = newBImg.data;
    uchar* binaryData = edgedOnlyImage.data;
    int step = edgedOnlyImage.step / sizeof(uchar);

    for (int i = 0; i<edgedOnlyImage.rows; i++)
        for (int j = 0; j<edgedOnlyImage.cols; j++)
            newBImgData[i*step + j] = 255 - binaryData[i*step + j];

    edgedOnlyImage = newBImg.clone();
    uchar r, g, b;
        // Converted canny image to BGRA4 channel image
    cvtColor(edgedOnlyImage, edgedOnlyImage, COLOR_GRAY2BGRA);
    for (int i = 0; i < edgedOnlyImage.rows; i++)
        for (int j = 0; j < edgedOnlyImage.cols; j++)
        {
            // R
            r = edgedOnlyImage.at<Vec4b>(i, j)[2];
            // G
            g = edgedOnlyImage.at<Vec4b>(i, j)[1];
            // B
            b = edgedOnlyImage.at<Vec4b>(i, j)[0];

            if (r > 220 && b > 220 && g > 220)
            {
                // A
                edgedOnlyImage.at<Vec4b>(i, j)[3] = 0;
            }
            if (r < 20 && b < 20 && g < 20)
            {
                // A
                edgedOnlyImage.at<Vec4b>(i, j)[1] = 255;
                edgedOnlyImage.at<Vec4b>(i, j)[2] = 0;
                edgedOnlyImage.at<Vec4b>(i, j)[0] = 0;
            }
        }
    return edgedOnlyImage;
}

通过 Project-Bridging-Header.h 中桥接,可以直接在 ViewController 中使用相关的 OC 函数

// get images from the camera
guard let  imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
        CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags.readOnly)
        let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
        let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
        let width = CVPixelBufferGetWidth(imageBuffer)
        let height = CVPixelBufferGetHeight(imageBuffer)
        let colorSpace = CGColorSpaceCreateDeviceRGB()
        var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Little.rawValue
        bitmapInfo |= CGImageAlphaInfo.premultipliedFirst.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
        let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
        guard let quartzImage = context?.makeImage() else { return }
        CVPixelBufferUnlockBaseAddress(imageBuffer, CVPixelBufferLockFlags.readOnly)
        let image = UIImage(cgImage: quartzImage)

因为不能在此处使用UIKit相关的操作,所以需要放入 DispatchQueue.main.async 保证线程安全

let imageWithEdgeOverlay = OpenCVWrapper().detectEdge(in: image)
DispatchQueue.main.async {
  self.CannyView.image = imageWithEdgeOverlay
}

目标三:用 C++ 实现图像均值计算库,导入并实时计算图像均值

用 C++ 实现图像均值我还是沿用了 OpenCV 里的计算库, 考虑到最后导入到项目中还是需要混合编程, 再加上新版 Xcode 不支持写 C++ 的库和框架, 所以最后用 OC 实现

同样,在 OC 中使用混编需要将 .m 文件改成 .mm

@implementation AveragePiexlValueOC : NSObject 
- (double) PixelValueOC: (UIImage *) image{
    cv::Mat opencvImage;
    UIImageToMat(image, opencvImage, true);
    pixelValue calculator;
    double result = calculator.averagePixelValue(opencvImage);
    return result;
}
@end
double pixelValue::averagePixelValue(Mat image)
{
    Scalar mean;
    Scalar dev;
    meanStdDev ( image, mean, dev );
    return mean.val[0];
};

运行后获得 libAveragePiexlValueOC.a 的静态库,将其及 AveragePiexlValueOC.h 拉入项目中,在 Project-Bridging-Header.h 中桥接,便可以在 ViewController 中使用

和 CannyView 一样,需要放入 DispatchQueue.main.async

var averagePixelValueData2 = AveragePiexlValueOC().pixelValueOC(image)
DispatchQueue.main.async {
            self.dataShow.text = "AVERAGE PIXEL: "+String(format: "%.2f", averagePixelValueData2)
            self.CannyView.image = imageWithEdgeOverlay
        }

AppUI相关

App图标

App 启动页

App 主界面UI

对应层级图

最终效果展示

后置摄像头

前置摄像头

计算像素均值

获取边缘

《CannyX》上有2条评论

发表评论

邮箱地址不会被公开。 必填项已用*标注