If you change your code to
func videoSnapshot(videoURL: String, imgName : String) -> UIImage? {
let asset = AVAsset(url: URL(string: videoURL)!)
let assetImgGenerate = AVAssetImageGenerator(asset: asset)
assetImgGenerate.appliesPreferredTrackTransform = true
let time = CMTimeMakeWithSeconds(Float64(5), preferredTimescale: 100)
do {
let img = try assetImgGenerate.copyCGImage(at: time, actualTime: nil)
let thumbnail = UIImage(cgImage: img)
return thumbnail
} catch {
return UIImage(named: imgName)
}
}
thats 5 seconds in. You can see the thumbnail generated.
This is because AVAssetImageGenerator is pretty flexible in what times it gets its frames
func videoSnapshot(videoURL: String, imgName : String) -> UIImage? {
let asset = AVAsset(url: URL(string: videoURL)!)
let assetImgGenerate = AVAssetImageGenerator(asset: asset)
assetImgGenerate.appliesPreferredTrackTransform = true
assetImgGenerate.requestedTimeToleranceAfter = .zero
assetImgGenerate.requestedTimeToleranceBefore = .zero
let time = CMTimeMakeWithSeconds(Float64(1), preferredTimescale: 100)
do {
let img = try assetImgGenerate.copyCGImage(at: time, actualTime: nil)
let thumbnail = UIImage(cgImage: img)
return thumbnail
} catch {
return UIImage(named: imgName)
}
}
Setting
assetImgGenerate.requestedTimeToleranceAfter = .zero
assetImgGenerate.requestedTimeToleranceBefore = .zero
ensures you get a frame accurate thumbnail.
For more info see
https://developer.apple.com/documentation/avfoundation/avassetimagegenerator/1390571-requestedtimetolerancebefore