我有一个与AVFoundation搭建的快速录像机.我试着手动记录放大手势和放大/缩小相机.当手势开始时,我对变焦级别的计算似乎是关闭的.放大/缩小并不流畅,而且很有问题.如果我不断地放大缩小而不松开,那么变焦就会顺利地添加到相机上(不是很平滑,但很有效).然而,当我开始做手势时,相机要么完全放大,要么缩小,这主要是我面临的问题.

import SwiftUI
import SwiftUI
import AVKit
import AVFoundation

struct HomeStory: View {
   @StateObject var cameraModel = CameraViewModel()
   @GestureState private var scale: CGFloat = 1.0
   @State private var previousScale: CGFloat = 1.0
   
   var body: some View {
       ZStack(alignment: .bottom) {
           CameraStoryView()
               .environmentObject(cameraModel)
               .clipShape(RoundedRectangle(cornerRadius: 30, style: .continuous))
               .gesture(MagnificationGesture()
                   .updating($scale, body: { (value, state, _) in
                       state = value
                   })
                   .onChanged { value in
                       let delta = value / previousScale
                       cameraModel.zoom(delta)
                       previousScale = value
                   }
               )
       }
   }
}

struct CameraStoryView: View {
   @EnvironmentObject var cameraModel: CameraViewModel
   var body: some View {
       
       GeometryReader { proxy in
           let size = proxy.size
           
           CameraPreview(size: size)
               .environmentObject(cameraModel)
       }
   }
}

struct CameraPreview: UIViewRepresentable {
   @EnvironmentObject var cameraModel : CameraViewModel
   var size: CGSize
   
   func makeUIView(context: Context) ->  UIView {
       let view = UIView()
       
       cameraModel.preview = AVCaptureVideoPreviewLayer(session: cameraModel.session)
       cameraModel.preview.frame.size = size
       
       cameraModel.preview.videoGravity = .resizeAspectFill
       view.layer.addSublayer(cameraModel.preview)
       
       DispatchQueue.global(qos: .userInitiated).async {
           cameraModel.session.startRunning()
       }
       
       return view
   }
   
   func updateUIView(_ uiView: UIView, context: Context) { }
}


class CameraViewModel: NSObject, ObservableObject, AVCaptureFileOutputRecordingDelegate, AVCapturePhotoCaptureDelegate {
   @Published var session = AVCaptureSession()
   @Published var alert = false
   @Published var output = AVCaptureMovieFileOutput()
   @Published var preview: AVCaptureVideoPreviewLayer!
   @Published var isRecording: Bool = false
   @Published var recordedURLs: [URL] = []
   @Published var previewURL: URL?
   @Published var showPreview: Bool = false
   @Published var recordedDuration: CGFloat = 0
   @Published var maxDuration: CGFloat = 20
   
   @Published var capturedImage: UIImage?
   @Published var photoOutput = AVCapturePhotoOutput()
   @Published var flashMode: AVCaptureDevice.FlashMode = .off
   var currentCameraPosition: AVCaptureDevice.Position = .back
   
   func zoom(_ delta: CGFloat) {
       if currentCameraPosition == .back {
           guard let device = AVCaptureDevice.default(for: .video) else { return }
           do {
               try device.lockForConfiguration()
               
               let currentZoomFactor = device.videoZoomFactor
               var newZoomFactor = currentZoomFactor * delta
               
               newZoomFactor = max(1.0, min(newZoomFactor, 3.0))
               
               device.videoZoomFactor = newZoomFactor
               device.unlockForConfiguration()
           } catch {
               print("Error zooming camera: \(error.localizedDescription)")
           }
       } else {
           guard let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front) else { return }
           do {
               try device.lockForConfiguration()
               
               let currentZoomFactor = device.videoZoomFactor
               var newZoomFactor = currentZoomFactor * delta
               
               newZoomFactor = max(1.0, min(newZoomFactor, 3.0))
               
               device.videoZoomFactor = newZoomFactor
               device.unlockForConfiguration()
           } catch {
               print("Error zooming camera: \(error.localizedDescription)")
           }
       }
   }
   
   func flipCamera() {
       // Create a discovery session to find all available video devices
       let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .unspecified)

       // Get all available video devices
       let videoDevices = discoverySession.devices

       // Check if there is more than one video device
       guard videoDevices.count > 1 else {
           return // If not, return early
       }

       // Get the current input
       guard let currentVideoInput = session.inputs.first as? AVCaptureDeviceInput else {
           return
       }

       // Get the new camera position
       let newCameraPosition: AVCaptureDevice.Position = (currentCameraPosition == .back) ? .front : .back

       // Find the new camera device
       if let newCamera = videoDevices.first(where: { $0.position == newCameraPosition }) {
           // Create a new video input
           do {
               let newVideoInput = try AVCaptureDeviceInput(device: newCamera)

               // Remove the current input
               session.removeInput(currentVideoInput)

               // Add the new input
               if session.canAddInput(newVideoInput) {
                   session.addInput(newVideoInput)
                   currentCameraPosition = newCameraPosition
               } else {
                   // Handle the case where adding the new input fails
                   print("Failed to add new camera input")
               }
           } catch {
               // Handle any errors that occur while creating the new input
               print("Error creating new camera input: \(error.localizedDescription)")
           }
       }
   }
   
   func takePhoto() {
       let photoSettings = AVCapturePhotoSettings()
       self.photoOutput.capturePhoto(with: photoSettings, delegate: self)
   }
   
   func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
       if let error = error {
           print("Error capturing photo: \(error.localizedDescription)")
           return
       }

       if let imageData = photo.fileDataRepresentation(), let capturedImage = UIImage(data: imageData) {
           self.capturedImage = capturedImage
       }
   }
   
   func checkPermission(){
       
       switch AVCaptureDevice.authorizationStatus(for: .video) {
       case .authorized:
           setUp()
           return
       case .notDetermined:
           AVCaptureDevice.requestAccess(for: .video) { (status) in
               
               if status{
                   self.setUp()
               }
           }
       case .denied:
           self.alert.toggle()
           return
       default:
           return
       }
   }
   
   func setUp(){
       
       do{
           self.session.beginConfiguration()
           let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
           let videoInput = try AVCaptureDeviceInput(device: cameraDevice!)
           let audioDevice = AVCaptureDevice.default(for: .audio)
           let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
           
           // MARK: Audio Input
           
           if self.session.canAddInput(videoInput) && self.session.canAddInput(audioInput){
               self.session.addInput(videoInput)
               self.session.addInput(audioInput)
           }

           if self.session.canAddOutput(self.output){
               self.session.addOutput(self.output)
           }
           
           if self.session.canAddOutput(self.photoOutput) {
               self.session.addOutput(self.photoOutput)
           }
           
           self.session.commitConfiguration()
       }
       catch{
           print(error.localizedDescription)
       }
   }
   
   func startRecording(){
       // MARK: Temporary URL for recording Video
       let tempURL = NSTemporaryDirectory() + "\(Date()).mov"
       output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
       isRecording = true
   }
   
   func stopRecording(){
       output.stopRecording()
       isRecording = false
   }
   
   func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
       if let error = error {
           print(error.localizedDescription)
           return
       }
       
       // CREATED SUCCESSFULLY
       print(outputFileURL)
       self.recordedURLs.append(outputFileURL)
       if self.recordedURLs.count == 1{
           self.previewURL = outputFileURL
           return
       }
       
       // CONVERTING URLs TO ASSETS
       let assets = recordedURLs.compactMap { url -> AVURLAsset in
           return AVURLAsset(url: url)
       }
       
       self.previewURL = nil
       // MERGING VIDEOS
       Task {
           await mergeVideos(assets: assets) { exporter in
               exporter.exportAsynchronously {
                   if exporter.status == .failed{
                       // HANDLE ERROR
                       print(exporter.error!)
                   }
                   else{
                       if let finalURL = exporter.outputURL{
                           print(finalURL)
                           DispatchQueue.main.async {
                               self.previewURL = finalURL
                           }
                       }
                   }
               }
           }
       }
   }
   
   func mergeVideos(assets: [AVURLAsset],completion: @escaping (_ exporter: AVAssetExportSession)->()) async {
       
       let compostion = AVMutableComposition()
       var lastTime: CMTime = .zero
       
       guard let videoTrack = compostion.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
       guard let audioTrack = compostion.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
       
       for asset in assets {
           // Linking Audio and Video
           do {
               try await videoTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.load(.duration)), of: asset.loadTracks(withMediaType: .video)[0], at: lastTime)
               // Safe Check if Video has Audio
               if try await !asset.loadTracks(withMediaType: .audio).isEmpty {
                   try await audioTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.load(.duration)), of: asset.loadTracks(withMediaType: .audio)[0], at: lastTime)
               }
           }
           catch {
               print(error.localizedDescription)
           }
           
           // Updating Last Time
           do {
               lastTime = try await CMTimeAdd(lastTime, asset.load(.duration))
           } catch {
               print(error.localizedDescription)
           }
       }
       
       // MARK: Temp Output URL
       let tempURL = URL(fileURLWithPath: NSTemporaryDirectory() + "Reel-\(Date()).mp4")
       
       // VIDEO IS ROTATED
       // BRINGING BACK TO ORIGNINAL TRANSFORM
       
       let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
       
       // MARK: Transform
       var transform = CGAffineTransform.identity
       transform = transform.rotated(by: 90 * (.pi / 180))
       transform = transform.translatedBy(x: 0, y: -videoTrack.naturalSize.height)
       layerInstructions.setTransform(transform, at: .zero)
       
       let instructions = AVMutableVideoCompositionInstruction()
       instructions.timeRange = CMTimeRange(start: .zero, duration: lastTime)
       instructions.layerInstructions = [layerInstructions]
       
       let videoComposition = AVMutableVideoComposition()
       videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
       videoComposition.instructions = [instructions]
       videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
       
       guard let exporter = AVAssetExportSession(asset: compostion, presetName: AVAssetExportPresetHighestQuality) else{return}
       exporter.outputFileType = .mp4
       exporter.outputURL = tempURL
       exporter.videoComposition = videoComposition
       completion(exporter)
   }
}

推荐答案

try 并调整您的zoom(_ delta: CGFloat)方法,以确保zoom 级别变化平稳.

func zoom(_ delta: CGFloat) {
    guard let device = (currentCameraPosition == .back ? AVCaptureDevice.default(for: .video) : AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)) else { return }
    do {
        try device.lockForConfiguration()
        
        var newZoomFactor = device.videoZoomFactor * delta
        newZoomFactor = max(1.0, min(newZoomFactor, device.activeFormat.videoMaxZoomFactor))
        
        device.videoZoomFactor = newZoomFactor
        device.unlockForConfiguration()
    } catch {
        print("Error zooming camera: \(error.localizedDescription)")
    }
}

这将巩固前置和后置摄像头的设备 Select .您可以使用device.activeFormat.videoMaxZoomFactor来确定最大zoom 级别,该级别提供特定于设备的zoom 限制.

HomeStory视图中,您需要更新手势处理以在手势结束时重置previousScale:

.gesture(MagnificationGesture()
    .updating($scale, body: { (value, state, _) in
        state = value
    })
    .onChanged { value in
        let delta = value / previousScale
        cameraModel.zoom(delta)
        previousScale = value
    }
    .onEnded { _ in
        previousScale = 1.0
    }
)

这应该会确保每一个新的姿态都有一个新的标尺.

+-------------------------+
| HomeStory               |
| +---------------------+ |
| | MagnificationGesture| |
| |   onChanged         |<------ Update Scale ---+
| |   onEnded           | |                      |
| +---------------------+ |                      |
| +---------------------+ |                      |
| | CameraViewModel     | |                      |
| |   zoom(_ delta:)    | +---- Apply Zoom Delta |
| +---------------------+ |                      |
+-------------------------+                      |
                                                 |
(Camera)                                         |
+-------------------------+                      |
| AVCaptureDevice         |                      |
|   zoomFactor            |<---------------------+
+-------------------------+

有关另一种方法,请参阅"Pinch to zoom camera"(2022年的答案,更适合UIKit,而不是SwiftUI,但有一些 idea ).

Ios相关问答推荐

IFrame内容拒绝仅在iOS浏览器上加载-即使发布了内容-安全-策略(CSP)框架-祖先指令

SWIFT用户界面检测手势的开始和结束

将数据传回VC时委托为空

如何在Android或iPhone上的原生react 中处理其他应用程序通知?

磁盘上的Objective-C ISA指针与实例化对象时的比较

Xcode 15模拟器用x86_64编译

拖动手势导致 sim 崩溃 Swift UI

如何删除点击时按钮的不透明动画?

在 Swift 项目中使用情节提要加载 obj-c 文件

使用异步重载实现Swift协议一致性

SwiftUI:同时拖动父视图和子视图,但手指分开

使用 SceneKit 从 CapturedRoom.walls 重新创建 RoomPlan

使用 Foundation 解压缩文件

在 iPhone 中将 UIViewController 显示为弹出窗口

如何覆盖@synthesized getter?

SwiftUI NavigationView navigationBarTitle LayoutConstraints 问题

文件是为存档而构建的,它不是被链接的体系 struct (i386)

如何在 iphone 屏幕中间显示活动指示器?

从 Swift 转换为 Objective-c 的工具

请在您的 Podfile 中为此目标指定一个平台?