我有一个与AVFoundation搭建的快速录像机.我试着手动记录放大手势和放大/缩小相机.当手势开始时,我对变焦级别的计算似乎是关闭的.放大/缩小并不流畅,而且很有问题.如果我不断地放大缩小而不松开,那么变焦就会顺利地添加到相机上(不是很平滑,但很有效).然而,当我开始做手势时,相机要么完全放大,要么缩小,这主要是我面临的问题.
import SwiftUI
import SwiftUI
import AVKit
import AVFoundation
struct HomeStory: View {
@StateObject var cameraModel = CameraViewModel()
@GestureState private var scale: CGFloat = 1.0
@State private var previousScale: CGFloat = 1.0
var body: some View {
ZStack(alignment: .bottom) {
CameraStoryView()
.environmentObject(cameraModel)
.clipShape(RoundedRectangle(cornerRadius: 30, style: .continuous))
.gesture(MagnificationGesture()
.updating($scale, body: { (value, state, _) in
state = value
})
.onChanged { value in
let delta = value / previousScale
cameraModel.zoom(delta)
previousScale = value
}
)
}
}
}
struct CameraStoryView: View {
@EnvironmentObject var cameraModel: CameraViewModel
var body: some View {
GeometryReader { proxy in
let size = proxy.size
CameraPreview(size: size)
.environmentObject(cameraModel)
}
}
}
struct CameraPreview: UIViewRepresentable {
@EnvironmentObject var cameraModel : CameraViewModel
var size: CGSize
func makeUIView(context: Context) -> UIView {
let view = UIView()
cameraModel.preview = AVCaptureVideoPreviewLayer(session: cameraModel.session)
cameraModel.preview.frame.size = size
cameraModel.preview.videoGravity = .resizeAspectFill
view.layer.addSublayer(cameraModel.preview)
DispatchQueue.global(qos: .userInitiated).async {
cameraModel.session.startRunning()
}
return view
}
func updateUIView(_ uiView: UIView, context: Context) { }
}
class CameraViewModel: NSObject, ObservableObject, AVCaptureFileOutputRecordingDelegate, AVCapturePhotoCaptureDelegate {
@Published var session = AVCaptureSession()
@Published var alert = false
@Published var output = AVCaptureMovieFileOutput()
@Published var preview: AVCaptureVideoPreviewLayer!
@Published var isRecording: Bool = false
@Published var recordedURLs: [URL] = []
@Published var previewURL: URL?
@Published var showPreview: Bool = false
@Published var recordedDuration: CGFloat = 0
@Published var maxDuration: CGFloat = 20
@Published var capturedImage: UIImage?
@Published var photoOutput = AVCapturePhotoOutput()
@Published var flashMode: AVCaptureDevice.FlashMode = .off
var currentCameraPosition: AVCaptureDevice.Position = .back
func zoom(_ delta: CGFloat) {
if currentCameraPosition == .back {
guard let device = AVCaptureDevice.default(for: .video) else { return }
do {
try device.lockForConfiguration()
let currentZoomFactor = device.videoZoomFactor
var newZoomFactor = currentZoomFactor * delta
newZoomFactor = max(1.0, min(newZoomFactor, 3.0))
device.videoZoomFactor = newZoomFactor
device.unlockForConfiguration()
} catch {
print("Error zooming camera: \(error.localizedDescription)")
}
} else {
guard let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front) else { return }
do {
try device.lockForConfiguration()
let currentZoomFactor = device.videoZoomFactor
var newZoomFactor = currentZoomFactor * delta
newZoomFactor = max(1.0, min(newZoomFactor, 3.0))
device.videoZoomFactor = newZoomFactor
device.unlockForConfiguration()
} catch {
print("Error zooming camera: \(error.localizedDescription)")
}
}
}
func flipCamera() {
// Create a discovery session to find all available video devices
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
// Get all available video devices
let videoDevices = discoverySession.devices
// Check if there is more than one video device
guard videoDevices.count > 1 else {
return // If not, return early
}
// Get the current input
guard let currentVideoInput = session.inputs.first as? AVCaptureDeviceInput else {
return
}
// Get the new camera position
let newCameraPosition: AVCaptureDevice.Position = (currentCameraPosition == .back) ? .front : .back
// Find the new camera device
if let newCamera = videoDevices.first(where: { $0.position == newCameraPosition }) {
// Create a new video input
do {
let newVideoInput = try AVCaptureDeviceInput(device: newCamera)
// Remove the current input
session.removeInput(currentVideoInput)
// Add the new input
if session.canAddInput(newVideoInput) {
session.addInput(newVideoInput)
currentCameraPosition = newCameraPosition
} else {
// Handle the case where adding the new input fails
print("Failed to add new camera input")
}
} catch {
// Handle any errors that occur while creating the new input
print("Error creating new camera input: \(error.localizedDescription)")
}
}
}
func takePhoto() {
let photoSettings = AVCapturePhotoSettings()
self.photoOutput.capturePhoto(with: photoSettings, delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let error = error {
print("Error capturing photo: \(error.localizedDescription)")
return
}
if let imageData = photo.fileDataRepresentation(), let capturedImage = UIImage(data: imageData) {
self.capturedImage = capturedImage
}
}
func checkPermission(){
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
setUp()
return
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { (status) in
if status{
self.setUp()
}
}
case .denied:
self.alert.toggle()
return
default:
return
}
}
func setUp(){
do{
self.session.beginConfiguration()
let cameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let videoInput = try AVCaptureDeviceInput(device: cameraDevice!)
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioInput = try AVCaptureDeviceInput(device: audioDevice!)
// MARK: Audio Input
if self.session.canAddInput(videoInput) && self.session.canAddInput(audioInput){
self.session.addInput(videoInput)
self.session.addInput(audioInput)
}
if self.session.canAddOutput(self.output){
self.session.addOutput(self.output)
}
if self.session.canAddOutput(self.photoOutput) {
self.session.addOutput(self.photoOutput)
}
self.session.commitConfiguration()
}
catch{
print(error.localizedDescription)
}
}
func startRecording(){
// MARK: Temporary URL for recording Video
let tempURL = NSTemporaryDirectory() + "\(Date()).mov"
output.startRecording(to: URL(fileURLWithPath: tempURL), recordingDelegate: self)
isRecording = true
}
func stopRecording(){
output.stopRecording()
isRecording = false
}
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
if let error = error {
print(error.localizedDescription)
return
}
// CREATED SUCCESSFULLY
print(outputFileURL)
self.recordedURLs.append(outputFileURL)
if self.recordedURLs.count == 1{
self.previewURL = outputFileURL
return
}
// CONVERTING URLs TO ASSETS
let assets = recordedURLs.compactMap { url -> AVURLAsset in
return AVURLAsset(url: url)
}
self.previewURL = nil
// MERGING VIDEOS
Task {
await mergeVideos(assets: assets) { exporter in
exporter.exportAsynchronously {
if exporter.status == .failed{
// HANDLE ERROR
print(exporter.error!)
}
else{
if let finalURL = exporter.outputURL{
print(finalURL)
DispatchQueue.main.async {
self.previewURL = finalURL
}
}
}
}
}
}
}
func mergeVideos(assets: [AVURLAsset],completion: @escaping (_ exporter: AVAssetExportSession)->()) async {
let compostion = AVMutableComposition()
var lastTime: CMTime = .zero
guard let videoTrack = compostion.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
guard let audioTrack = compostion.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else{return}
for asset in assets {
// Linking Audio and Video
do {
try await videoTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.load(.duration)), of: asset.loadTracks(withMediaType: .video)[0], at: lastTime)
// Safe Check if Video has Audio
if try await !asset.loadTracks(withMediaType: .audio).isEmpty {
try await audioTrack.insertTimeRange(CMTimeRange(start: .zero, duration: asset.load(.duration)), of: asset.loadTracks(withMediaType: .audio)[0], at: lastTime)
}
}
catch {
print(error.localizedDescription)
}
// Updating Last Time
do {
lastTime = try await CMTimeAdd(lastTime, asset.load(.duration))
} catch {
print(error.localizedDescription)
}
}
// MARK: Temp Output URL
let tempURL = URL(fileURLWithPath: NSTemporaryDirectory() + "Reel-\(Date()).mp4")
// VIDEO IS ROTATED
// BRINGING BACK TO ORIGNINAL TRANSFORM
let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
// MARK: Transform
var transform = CGAffineTransform.identity
transform = transform.rotated(by: 90 * (.pi / 180))
transform = transform.translatedBy(x: 0, y: -videoTrack.naturalSize.height)
layerInstructions.setTransform(transform, at: .zero)
let instructions = AVMutableVideoCompositionInstruction()
instructions.timeRange = CMTimeRange(start: .zero, duration: lastTime)
instructions.layerInstructions = [layerInstructions]
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
videoComposition.instructions = [instructions]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
guard let exporter = AVAssetExportSession(asset: compostion, presetName: AVAssetExportPresetHighestQuality) else{return}
exporter.outputFileType = .mp4
exporter.outputURL = tempURL
exporter.videoComposition = videoComposition
completion(exporter)
}
}