Swift - 人脸检测,以及人脸打码的功能实现(附样例)
1,人脸检测的实现
(1)人脸检测是指在图像中寻找符合人脸特征的区域,找到后会返回该特征的信息(比如人脸的范围、眼睛和嘴巴的位置等)。不是指人脸识别,识别出是谁的脸。
(3)CIBlendWithMask滤镜把马赛克图、原图、蒙版图混合起来,输出即可。
3,效果图如下
4,代码如下
源码下载:hangge_907.zip
(1)人脸检测是指在图像中寻找符合人脸特征的区域,找到后会返回该特征的信息(比如人脸的范围、眼睛和嘴巴的位置等)。不是指人脸识别,识别出是谁的脸。
(2)Core Image框架中的的CIDetector对象提供了对图像检测的功能。创建CIDetector对象时使用CIDetectorTypeFace表示检测人脸。
(3)下面通过样例演示如何进行人脸检测,同时检测完成后会用方框把人脸给标注出来。
(注意:由于方框是一个个UIView添加到imageView中,而人脸检测出来的位置是相对于原图的。所以方框放置的位置要考虑图片在imageView里的缩放大小,x轴,y轴的偏移量)
2,给人脸打上马赛克的功能实现
(1)使用用CIPixellate滤镜对原图先做个完全马赛克
(2)检测人脸,以人脸为中心,脸的宽度或高度为半径。做一个包含一个一个圆形区域的蒙板。(3)CIBlendWithMask滤镜把马赛克图、原图、蒙版图混合起来,输出即可。
3,效果图如下
4,代码如下
import UIKit import ImageIO class ViewController: UIViewController { @IBOutlet weak var imageView: UIImageView! //原图 lazy var originalImage: UIImage = { return UIImage(named: "d1.jpg") }()! lazy var context: CIContext = { return CIContext(options: nil) }() override func viewDidLoad() { super.viewDidLoad() } //恢复原图 @IBAction func resetImg(sender: AnyObject) { imageView.image = originalImage } //检测人脸并框出 @IBAction func detectFace(sender: AnyObject) { imageView.image = originalImage let inputImage = CIImage(image: originalImage)! //人脸检测器 //CIDetectorAccuracyHigh:检测的精度高,但速度更慢些 let detector = CIDetector(ofType: CIDetectorTypeFace, context: context, options: [CIDetectorAccuracy: CIDetectorAccuracyHigh]) var faceFeatures: [CIFaceFeature]! //人脸检测需要图片方向(有元数据的话使用元数据,没有就调用featuresInImage) if let orientation: AnyObject = inputImage .properties[kCGImagePropertyOrientation as String] { faceFeatures = detector.featuresInImage(inputImage, options: [CIDetectorImageOrientation: orientation]) as! [CIFaceFeature] } else { faceFeatures = detector.featuresInImage(inputImage) as! [CIFaceFeature] } //打印所有的面部特征 print(faceFeatures) let inputImageSize = inputImage.extent.size var transform = CGAffineTransformIdentity transform = CGAffineTransformScale(transform, 1, -1) transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height) //遍历所有的面部,并框出 for faceFeature in faceFeatures { var faceViewBounds = CGRectApplyAffineTransform(faceFeature.bounds, transform) // 由于检测的原图放在imageView中缩放的原因,我们还要考虑缩放比例和x,y轴偏移 let scale = min(imageView.bounds.size.width / inputImageSize.width, imageView.bounds.size.height / inputImageSize.height) let offsetX = (imageView.bounds.size.width - inputImageSize.width * scale) / 2 let offsetY = (imageView.bounds.size.height - inputImageSize.height * scale) / 2 faceViewBounds = CGRectApplyAffineTransform(faceViewBounds, CGAffineTransformMakeScale(scale, scale)) faceViewBounds.origin.x += offsetX faceViewBounds.origin.y += offsetY //每个人脸对应一个UIView方框 let faceView = UIView(frame: faceViewBounds) faceView.layer.borderColor = UIColor.orangeColor().CGColor faceView.layer.borderWidth = 2 imageView.addSubview(faceView) } } //检测人脸并打马赛克 @IBAction func detectAndPixFace(sender: AnyObject) { // 用CIPixellate滤镜对原图先做个完全马赛克 let filter = CIFilter(name: "CIPixellate")! print(filter.attributes) let inputImage = CIImage(image: originalImage)! filter.setValue(inputImage, forKey: kCIInputImageKey) let inputScale = max(inputImage.extent.size.width, inputImage.extent.size.height) / 80 filter.setValue(inputScale, forKey: kCIInputScaleKey) let fullPixellatedImage = filter.outputImage // 检测人脸,并保存在faceFeatures中 let detector = CIDetector(ofType: CIDetectorTypeFace, context: context, options: nil) let faceFeatures = detector.featuresInImage(inputImage) // 初始化蒙版图,并开始遍历检测到的所有人脸 var maskImage: CIImage! for faceFeature in faceFeatures { print(faceFeature.bounds) // 基于人脸的位置,为每一张脸都单独创建一个蒙版,所以要先计算出脸的中心点,对应为x、y轴坐标, // 再基于脸的宽度或高度给一个半径,最后用这些计算结果初始化一个CIRadialGradient滤镜 let centerX = faceFeature.bounds.origin.x + faceFeature.bounds.size.width / 2 let centerY = faceFeature.bounds.origin.y + faceFeature.bounds.size.height / 2 let radius = min(faceFeature.bounds.size.width, faceFeature.bounds.size.height) let radialGradient = CIFilter(name: "CIRadialGradient", withInputParameters: [ "inputRadius0" : radius, "inputRadius1" : radius + 1, "inputColor0" : CIColor(red: 0, green: 1, blue: 0, alpha: 1), "inputColor1" : CIColor(red: 0, green: 0, blue: 0, alpha: 0), kCIInputCenterKey : CIVector(x: centerX, y: centerY) ])! print(radialGradient.attributes) // 由于CIRadialGradient滤镜创建的是一张无限大小的图,所以在使用之前先对它进行裁剪 let radialGradientOutputImage = radialGradient.outputImage! .imageByCroppingToRect(inputImage.extent) if maskImage == nil { maskImage = radialGradientOutputImage } else { print(radialGradientOutputImage) maskImage = CIFilter(name: "CISourceOverCompositing", withInputParameters: [ kCIInputImageKey : radialGradientOutputImage, kCIInputBackgroundImageKey : maskImage ])!.outputImage } } // 用CIBlendWithMask滤镜把马赛克图、原图、蒙版图混合起来 let blendFilter = CIFilter(name: "CIBlendWithMask")! blendFilter.setValue(fullPixellatedImage, forKey: kCIInputImageKey) blendFilter.setValue(inputImage, forKey: kCIInputBackgroundImageKey) blendFilter.setValue(maskImage, forKey: kCIInputMaskImageKey) // 输出,在界面上显示 let blendOutputImage = blendFilter.outputImage let blendCGImage = context.createCGImage(blendOutputImage!, fromRect: blendOutputImage!.extent) imageView.image = UIImage(CGImage: blendCGImage) } override func didReceiveMemoryWarning() { super.didReceiveMemoryWarning() } }
func accountDeleteAddress(modelID:Int)->NSDictionary
{
var message : String!
var status : String = "error"
let id = addressList[modelID].id
let actionArray : NSMutableDictionary = ["id":id,"customer_id":USERID]
var result : NSDictionary!
AlamofireSwiftyJSON(url: "Account/InformationDelete", para: actionArray as NSMutableDictionary, successed: {
(response) in
let resultJson = JSON(response as Any)
if(resultJson["status"] == "success")
{
addressList.remove(at: modelID)
message = "\(jsonSuccessMessage)"
status = "success"
}
else
{
message = "\(resultJson["message"].stringValue)"
}
},failed: {
_ in
message = "\(jsonErrorMessage)"
})
result = ["status":status,"message":message]
return result
}
类似于这种 怎么改成同步的
hangge swift遇到一个问题 写一个带有返回值和数据交互的的方法 总是数据还没有获得到就return出来了 这个怎么解决
航哥大大,能否写一个人脸识别啊?跪求啊
很感谢作者!学习了 最近正在研究这个 非常感谢!
你好,请问这个你可以提供源码下载吗(*^__^*)