Merge branch 'charlesmchen/imageEditorTranslation'

pull/2/head
Matthew Chen 6 years ago
commit a2d48aa020

@ -173,6 +173,7 @@ public class ImageEditorCanvasView: UIView {
contentLayerMap.removeAll()
let viewSize = clipView.bounds.size
let transform = model.currentTransform()
if viewSize.width > 0,
viewSize.height > 0 {
@ -183,6 +184,7 @@ public class ImageEditorCanvasView: UIView {
for item in model.items() {
guard let layer = ImageEditorCanvasView.layerForItem(item: item,
model: model,
transform: transform,
viewSize: viewSize) else {
continue
}
@ -217,6 +219,7 @@ public class ImageEditorCanvasView: UIView {
}
let viewSize = clipView.bounds.size
let transform = model.currentTransform()
if viewSize.width > 0,
viewSize.height > 0 {
@ -234,6 +237,7 @@ public class ImageEditorCanvasView: UIView {
// Item was inserted or updated.
guard let layer = ImageEditorCanvasView.layerForItem(item: item,
model: model,
transform: transform,
viewSize: viewSize) else {
continue
}
@ -320,6 +324,7 @@ public class ImageEditorCanvasView: UIView {
private class func layerForItem(item: ImageEditorItem,
model: ImageEditorModel,
transform: ImageEditorTransform,
viewSize: CGSize) -> CALayer? {
AssertIsOnMainThread()
@ -332,7 +337,10 @@ public class ImageEditorCanvasView: UIView {
owsFailDebug("Item has unexpected type: \(type(of: item)).")
return nil
}
return strokeLayerForItem(item: strokeItem, viewSize: viewSize)
return strokeLayerForItem(item: strokeItem,
model: model,
transform: transform,
viewSize: viewSize)
case .text:
guard let textItem = item as? ImageEditorTextItem else {
owsFailDebug("Item has unexpected type: \(type(of: item)).")
@ -340,11 +348,14 @@ public class ImageEditorCanvasView: UIView {
}
return textLayerForItem(item: textItem,
model: model,
transform: transform,
viewSize: viewSize)
}
}
private class func strokeLayerForItem(item: ImageEditorStrokeItem,
model: ImageEditorModel,
transform: ImageEditorTransform,
viewSize: CGSize) -> CALayer? {
AssertIsOnMainThread()
@ -361,9 +372,13 @@ public class ImageEditorCanvasView: UIView {
shapeLayer.strokeColor = item.color.cgColor
shapeLayer.frame = CGRect(origin: .zero, size: viewSize)
// Stroke samples are specified in "image unit" coordinates, but
// need to be rendered in "canvas" coordinates. The imageFrame
// is the bounds of the image specified in "canvas" coordinates,
// so to transform we can simply convert from image frame units.
let imageFrame = ImageEditorCanvasView.imageFrame(forViewSize: viewSize, imageSize: model.srcImageSizePixels, transform: transform)
let transformSampleToPoint = { (unitSample: CGPoint) -> CGPoint in
return CGPoint(x: viewSize.width * unitSample.x,
y: viewSize.height * unitSample.y)
return unitSample.fromUnitCoordinates(viewBounds: imageFrame)
}
// Use bezier curves to smooth stroke.
@ -436,11 +451,11 @@ public class ImageEditorCanvasView: UIView {
private class func textLayerForItem(item: ImageEditorTextItem,
model: ImageEditorModel,
transform: ImageEditorTransform,
viewSize: CGSize) -> CALayer? {
AssertIsOnMainThread()
let imageFrame = self.imageFrame(forViewSize: viewSize, imageSize: model.srcImageSizePixels,
transform: model.currentTransform())
let imageFrame = ImageEditorCanvasView.imageFrame(forViewSize: viewSize, imageSize: model.srcImageSizePixels, transform: transform)
// We need to adjust the font size to reflect the current output scale,
// using the image width as reference.
@ -476,11 +491,14 @@ public class ImageEditorCanvasView: UIView {
.font: item.font.withSize(fontSize)
],
context: nil)
let center = CGPoint(x: viewSize.width * item.unitCenter.x,
y: viewSize.height * item.unitCenter.y)
// The text item's center is specified in "image unit" coordinates, but
// needs to be rendered in "canvas" coordinates. The imageFrame
// is the bounds of the image specified in "canvas" coordinates,
// so to transform we can simply convert from image frame units.
let centerInCanvas = item.unitCenter.fromUnitCoordinates(viewBounds: imageFrame)
let layerSize = CGSizeCeil(textBounds.size)
layer.frame = CGRect(origin: CGPoint(x: center.x - layerSize.width * 0.5,
y: center.y - layerSize.height * 0.5),
layer.frame = CGRect(origin: CGPoint(x: centerInCanvas.x - layerSize.width * 0.5,
y: centerInCanvas.y - layerSize.height * 0.5),
size: layerSize)
let transform = CGAffineTransform.identity.scaledBy(x: item.scaling, y: item.scaling).rotated(by: item.rotationRadians)
@ -521,41 +539,6 @@ public class ImageEditorCanvasView: UIView {
return result
}
// MARK: - Coordinates
public func locationUnit(forGestureRecognizer gestureRecognizer: UIGestureRecognizer,
transform: ImageEditorTransform) -> CGPoint {
return ImageEditorCanvasView.locationUnit(forGestureRecognizer: gestureRecognizer,
view: self.clipView,
transform: transform)
}
public class func locationUnit(forGestureRecognizer gestureRecognizer: UIGestureRecognizer,
view: UIView,
transform: ImageEditorTransform) -> CGPoint {
let locationInView = gestureRecognizer.location(in: view)
return locationUnit(forLocationInView: locationInView,
viewSize: view.bounds.size,
transform: transform)
}
public func locationUnit(forLocationInView locationInView: CGPoint,
transform: ImageEditorTransform) -> CGPoint {
let viewSize = self.clipView.bounds.size
return ImageEditorCanvasView.locationUnit(forLocationInView: locationInView,
viewSize: viewSize,
transform: transform)
}
public class func locationUnit(forLocationInView locationInView: CGPoint,
viewSize: CGSize,
transform: ImageEditorTransform) -> CGPoint {
let affineTransformStart = transform.affineTransform(viewSize: viewSize)
let locationInContent = locationInView.applyingInverse(affineTransformStart)
let locationUnit = locationInContent.toUnitCoordinates(viewSize: viewSize, shouldClamp: false)
return locationUnit
}
// MARK: - Actions
// Returns nil on error.
@ -608,6 +591,7 @@ public class ImageEditorCanvasView: UIView {
for item in model.items() {
guard let layer = layerForItem(item: item,
model: model,
transform: transform,
viewSize: viewSize) else {
owsFailDebug("Couldn't create layer for item.")
continue

@ -296,6 +296,23 @@ class ImageEditorCropViewController: OWSViewController {
return true
}
// MARK: - Gestures
private class func unitTranslation(oldLocationView: CGPoint,
newLocationView: CGPoint,
viewBounds: CGRect,
oldTransform: ImageEditorTransform) -> CGPoint {
// The beauty of using an SRT (scale-rotate-translation) tranform ordering
// is that the translation is applied last, so it's trivial to convert
// translations from view coordinates to transform translation.
// Our (view bounds == canvas bounds) so no need to convert.
let translation = newLocationView.minus(oldLocationView)
let translationUnit = translation.toUnitCoordinates(viewSize: viewBounds.size, shouldClamp: false)
let newUnitTranslation = oldTransform.unitTranslation.plus(translationUnit)
return newUnitTranslation
}
// MARK: - Pinch Gesture
@objc
@ -315,12 +332,10 @@ class ImageEditorCropViewController: OWSViewController {
return
}
let locationUnitStart = self.locationUnit(forLocationInView: gestureRecognizer.pinchStateStart.centroid,
transform: gestureStartTransform)
let locationUnitLast = self.locationUnit(forLocationInView: gestureRecognizer.pinchStateLast.centroid,
transform: gestureStartTransform)
let locationUnitDelta = CGPointSubtract(locationUnitLast, locationUnitStart)
let newUnitTranslation = CGPointAdd(gestureStartTransform.unitTranslation, locationUnitDelta)
let newUnitTranslation = ImageEditorCropViewController.unitTranslation(oldLocationView: gestureRecognizer.pinchStateStart.centroid,
newLocationView: gestureRecognizer.pinchStateLast.centroid,
viewBounds: clipView.bounds,
oldTransform: gestureStartTransform)
let newRotationRadians = gestureStartTransform.rotationRadians + gestureRecognizer.pinchStateLast.angleRadians - gestureRecognizer.pinchStateStart.angleRadians
@ -516,16 +531,16 @@ class ImageEditorCropViewController: OWSViewController {
owsFailDebug("Missing pinchTransform.")
return
}
guard let locationStart = gestureRecognizer.locationStart else {
guard let oldLocationView = gestureRecognizer.locationStart else {
owsFailDebug("Missing locationStart.")
return
}
let locationNow = gestureRecognizer.location(in: self.clipView)
let locationUnitStart = self.locationUnit(forLocationInView: locationStart, transform: gestureStartTransform)
let locationUnitNow = self.locationUnit(forLocationInView: locationNow, transform: gestureStartTransform)
let locationUnitDelta = CGPointSubtract(locationUnitNow, locationUnitStart)
let newUnitTranslation = CGPointAdd(gestureStartTransform.unitTranslation, locationUnitDelta)
let newLocationView = gestureRecognizer.location(in: self.clipView)
let newUnitTranslation = ImageEditorCropViewController.unitTranslation(oldLocationView: oldLocationView,
newLocationView: newLocationView,
viewBounds: clipView.bounds,
oldTransform: gestureStartTransform)
updateTransform(ImageEditorTransform(outputSizePixels: gestureStartTransform.outputSizePixels,
unitTranslation: newUnitTranslation,
@ -573,18 +588,6 @@ class ImageEditorCropViewController: OWSViewController {
}
}
// MARK: - Coordinates
private func locationUnit(forGestureRecognizer gestureRecognizer: UIGestureRecognizer,
transform: ImageEditorTransform) -> CGPoint {
return ImageEditorCanvasView.locationUnit(forGestureRecognizer: gestureRecognizer, view: clipView, transform: transform)
}
private func locationUnit(forLocationInView locationInView: CGPoint,
transform: ImageEditorTransform) -> CGPoint {
return ImageEditorCanvasView.locationUnit(forLocationInView: locationInView, viewSize: clipView.bounds.size, transform: transform)
}
// MARK: - Events
@objc public func didTapBackButton() {
@ -600,17 +603,19 @@ class ImageEditorCropViewController: OWSViewController {
}
@objc public func rotate90ButtonPressed() {
rotateButtonPressed(angleRadians: CGFloat.pi * 0.5)
rotateButtonPressed(angleRadians: CGFloat.pi * 0.5, rotateCanvas: true)
}
@objc public func rotate45ButtonPressed() {
rotateButtonPressed(angleRadians: CGFloat.pi * 0.25)
rotateButtonPressed(angleRadians: CGFloat.pi * 0.25, rotateCanvas: false)
}
private func rotateButtonPressed(angleRadians: CGFloat) {
// Invert width and height.
let outputSizePixels = CGSize(width: transform.outputSizePixels.height,
height: transform.outputSizePixels.width)
private func rotateButtonPressed(angleRadians: CGFloat, rotateCanvas: Bool) {
let outputSizePixels = (rotateCanvas
// Invert width and height.
? CGSize(width: transform.outputSizePixels.height,
height: transform.outputSizePixels.width)
: transform.outputSizePixels)
let unitTranslation = transform.unitTranslation
let rotationRadians = transform.rotationRadians + angleRadians
let scaling = transform.scaling

@ -4,11 +4,63 @@
import UIKit
// The image editor uses multiple coordinate systems.
//
// * Image unit coordinates. Brush stroke and text content should be pegged to
// image content, so they are specified relative to the bounds of the image.
// * Canvas coordinates. We render the image, strokes and text into the "canvas",
// a viewport that has the aspect ratio of the view. Rendering is transformed, so
// this is pre-tranform.
// * View coordinates. The coordinates of the actual view (or rendered output).
// Bounded by the view's bounds / viewport.
//
// Sometimes we use unit coordinates. This facilitates a number of operations such
// as clamping to 0-1, etc. So in practice almost all values will be in one of six
// coordinate systems:
//
// * unit image coordinates
// * image coordinates
// * unit canvas coordinates
// * canvas coordinates
// * unit view coordinates
// * view coordinates
//
// For simplicity, the canvas bounds are always identical to view bounds.
// If we wanted to manipulate output quality, we would use the layer's "scale".
// But canvas values are pre-transform and view values are post-transform so they
// are only identical if the transform has no scaling, rotation or translation.
//
// The "ImageEditorTransform" can be used to generate an CGAffineTransform
// for the layers used to render the content. In practice, the affine transform
// is applied to a superlayer of the sublayers used to render content.
//
// CALayers apply their transform relative to the layer's anchorPoint, which
// by default is the center of the layer's bounds. E.g. rotation occurs
// around the center of the layer. Therefore when projecting absolute
// (but not relative) coordinates between the "view" and "canvas" coordinate
// systems, it's necessary to project them relative to the center of the
// view/canvas.
//
// To simplify our representation & operations, the default size of the image
// content is "exactly large enough to fill the canvas if rotation
// but not scaling or translation were applied". This might seem unusual,
// but we have a key invariant: we always want the image to fill the canvas.
// It's far easier to ensure this if the transform is always (just barely)
// valid when scaling = 1 and translation = .zero. The image size that
// fulfills this criteria is calculated using
// ImageEditorCanvasView.imageFrame(forViewSize:...). Transforming between
// the "image" and "canvas" coordinate systems is done with that image frame.
@objc
public class ImageEditorTransform: NSObject {
// The outputSizePixels is used to specify the aspect ratio and size of the
// output.
public let outputSizePixels: CGSize
// The unit translation of the content, relative to the
// canvas viewport.
public let unitTranslation: CGPoint
// Rotation about the center of the content.
public let rotationRadians: CGFloat
// x >= 1.0.
public let scaling: CGFloat
public init(outputSizePixels: CGSize,

@ -299,7 +299,7 @@ public class ImageEditorView: UIView {
}
let location = gestureRecognizer.location(in: canvasView.gestureReferenceView)
guard let textLayer = canvasView.textLayer(forLocation: location) else {
guard let textLayer = self.textLayer(forLocation: location) else {
return
}
@ -326,7 +326,7 @@ public class ImageEditorView: UIView {
switch gestureRecognizer.state {
case .began:
let pinchState = gestureRecognizer.pinchStateStart
guard let textLayer = canvasView.textLayer(forLocation: pinchState.centroid) else {
guard let textLayer = self.textLayer(forLocation: pinchState.centroid) else {
// The pinch needs to start centered on a text item.
return
}
@ -341,14 +341,20 @@ public class ImageEditorView: UIView {
return
}
let view = self.canvasView.gestureReferenceView
let viewBounds = view.bounds
let locationStart = gestureRecognizer.pinchStateStart.centroid
let locationUnitStart = locationUnit(forLocationInView: locationStart, transform: model.currentTransform())
let locationNow = gestureRecognizer.pinchStateLast.centroid
let locationUnitNow = locationUnit(forLocationInView: locationNow, transform: model.currentTransform())
let unitLocationDelta = CGPointSubtract(locationUnitNow,
locationUnitStart)
let unitCenter = CGPointClamp01(CGPointAdd(textItem.unitCenter, unitLocationDelta))
let gestureStartImageUnit = ImageEditorView.locationImageUnit(forLocationInView: locationStart,
viewBounds: viewBounds,
model: self.model,
transform: self.model.currentTransform())
let gestureNowImageUnit = ImageEditorView.locationImageUnit(forLocationInView: locationNow,
viewBounds: viewBounds,
model: self.model,
transform: self.model.currentTransform())
let gestureDeltaImageUnit = gestureNowImageUnit.minus(gestureStartImageUnit)
let unitCenter = CGPointClamp01(textItem.unitCenter.plus(gestureDeltaImageUnit))
// NOTE: We use max(1, ...) to avoid divide-by-zero.
let newScaling = CGFloatClamp(textItem.scaling * gestureRecognizer.pinchStateLast.distance / max(1.0, gestureRecognizer.pinchStateStart.distance),
@ -383,6 +389,13 @@ public class ImageEditorView: UIView {
private var movingTextStartUnitCenter: CGPoint?
private var movingTextHasMoved = false
private func textLayer(forLocation locationInView: CGPoint) -> EditorTextLayer? {
let viewBounds = self.canvasView.gestureReferenceView.bounds
let affineTransform = self.model.currentTransform().affineTransform(viewSize: viewBounds.size)
let locationInCanvas = locationInView.minus(viewBounds.center).applyingInverse(affineTransform).plus(viewBounds.center)
return canvasView.textLayer(forLocation: locationInCanvas)
}
@objc
public func handleMoveTextGesture(_ gestureRecognizer: ImageEditorPanGestureRecognizer) {
AssertIsOnMainThread()
@ -395,7 +408,7 @@ public class ImageEditorView: UIView {
owsFailDebug("Missing locationStart.")
return
}
guard let textLayer = canvasView.textLayer(forLocation: locationStart) else {
guard let textLayer = self.textLayer(forLocation: locationStart) else {
owsFailDebug("No text layer")
return
}
@ -420,12 +433,19 @@ public class ImageEditorView: UIView {
return
}
let locationUnitStart = canvasView.locationUnit(forLocationInView: locationStart, transform: model.currentTransform())
let locationNow = gestureRecognizer.location(in: canvasView.gestureReferenceView)
let locationUnitNow = canvasView.locationUnit(forLocationInView: locationNow, transform: model.currentTransform())
let unitLocationDelta = CGPointSubtract(locationUnitNow, locationUnitStart)
let unitCenter = CGPointClamp01(CGPointAdd(movingTextStartUnitCenter, unitLocationDelta))
let view = self.canvasView.gestureReferenceView
let viewBounds = view.bounds
let locationInView = gestureRecognizer.location(in: view)
let gestureStartImageUnit = ImageEditorView.locationImageUnit(forLocationInView: locationStart,
viewBounds: viewBounds,
model: self.model,
transform: self.model.currentTransform())
let gestureNowImageUnit = ImageEditorView.locationImageUnit(forLocationInView: locationInView,
viewBounds: viewBounds,
model: self.model,
transform: self.model.currentTransform())
let gestureDeltaImageUnit = gestureNowImageUnit.minus(gestureStartImageUnit)
let unitCenter = CGPointClamp01(movingTextStartUnitCenter.plus(gestureDeltaImageUnit))
let newItem = textItem.copy(withUnitCenter: unitCenter)
if movingTextHasMoved {
@ -461,7 +481,14 @@ public class ImageEditorView: UIView {
self.currentStrokeSamples.removeAll()
}
let tryToAppendStrokeSample = {
let newSample = self.locationUnit(forGestureRecognizer: gestureRecognizer, transform: self.model.currentTransform())
let view = self.canvasView.gestureReferenceView
let viewBounds = view.bounds
let locationInView = gestureRecognizer.location(in: view)
let newSample = ImageEditorView.locationImageUnit(forLocationInView: locationInView,
viewBounds: viewBounds,
model: self.model,
transform: self.model.currentTransform())
if let prevSample = self.currentStrokeSamples.last,
prevSample == newSample {
// Ignore duplicate samples.
@ -511,14 +538,15 @@ public class ImageEditorView: UIView {
// MARK: - Coordinates
private func locationUnit(forGestureRecognizer gestureRecognizer: UIGestureRecognizer,
transform: ImageEditorTransform) -> CGPoint {
return canvasView.locationUnit(forGestureRecognizer: gestureRecognizer, transform: transform)
}
private func locationUnit(forLocationInView locationInView: CGPoint,
transform: ImageEditorTransform) -> CGPoint {
return canvasView.locationUnit(forLocationInView: locationInView, transform: transform)
private class func locationImageUnit(forLocationInView locationInView: CGPoint,
viewBounds: CGRect,
model: ImageEditorModel,
transform: ImageEditorTransform) -> CGPoint {
let imageFrame = ImageEditorCanvasView.imageFrame(forViewSize: viewBounds.size, imageSize: model.srcImageSizePixels, transform: transform)
let affineTransformStart = transform.affineTransform(viewSize: viewBounds.size)
let locationInContent = locationInView.minus(viewBounds.center).applyingInverse(affineTransformStart).plus(viewBounds.center)
let locationImageUnit = locationInContent.toUnitCoordinates(viewBounds: imageFrame, shouldClamp: false)
return locationImageUnit
}
// MARK: - Edit Text Tool
@ -586,7 +614,7 @@ extension ImageEditorView: UIGestureRecognizerDelegate {
}
let location = touch.location(in: canvasView.gestureReferenceView)
let isInTextArea = canvasView.textLayer(forLocation: location) != nil
let isInTextArea = self.textLayer(forLocation: location) != nil
return isInTextArea
}
}

@ -149,9 +149,13 @@ public extension CGPoint {
return toUnitCoordinates(viewBounds: CGRect(origin: .zero, size: viewSize), shouldClamp: shouldClamp)
}
public func fromUnitCoordinates(viewBounds: CGRect) -> CGPoint {
return CGPoint(x: viewBounds.origin.x + x.lerp(0, viewBounds.size.width),
y: viewBounds.origin.y + y.lerp(0, viewBounds.size.height))
}
public func fromUnitCoordinates(viewSize: CGSize) -> CGPoint {
return CGPoint(x: x.lerp(0, viewSize.width),
y: y.lerp(0, viewSize.height))
return fromUnitCoordinates(viewBounds: CGRect(origin: .zero, size: viewSize))
}
public func inverse() -> CGPoint {

Loading…
Cancel
Save