From e2f63bdd03bdbb3485d41d7bc267cf652df6e72f Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Mon, 30 Oct 2023 16:28:18 -0700 Subject: [PATCH 01/12] Shitty offset curves for GPU stroke scaffolding Implemented a stroking scheme in the flatten stage in which every flattened line segment gets offset along the curve normal by the line width. The line vertices are offset _after_ subdivision and does not try to accurately estimate subdivisions for the offset curve. Cusps are not detected and are prone to failure. Regions with high curvature (especially around self-intersections) are handled by simply boosting the subdivision count using the curve offset which is not correct but avoids awfully missing samples when the tolerance is set too low. This is intended as a placeholder but is the first step towards implementing correct strokes without optimizing for performance. --- crates/encoding/src/encoding.rs | 14 +++- examples/scenes/src/test_scenes.rs | 5 +- shader/flatten.wgsl | 109 ++++++++++++++++++++++++++--- src/scene.rs | 59 ++++++++++------ 4 files changed, 153 insertions(+), 34 deletions(-) diff --git a/crates/encoding/src/encoding.rs b/crates/encoding/src/encoding.rs index 1eb524dfb..1e16b70cf 100644 --- a/crates/encoding/src/encoding.rs +++ b/crates/encoding/src/encoding.rs @@ -3,7 +3,10 @@ use super::{DrawColor, DrawTag, PathEncoder, PathTag, Style, Transform}; -use peniko::{kurbo::Shape, BlendMode, BrushRef, Color, Fill}; +use peniko::{ + kurbo::{Shape, Stroke}, + BlendMode, BrushRef, Color, Fill, +}; #[cfg(feature = "full")] use { @@ -172,6 +175,15 @@ impl Encoding { } } + /// Encodes a stroke style. + pub fn encode_stroke_style(&mut self, stroke: &Stroke) { + let style = Style::from_stroke(stroke); + if self.styles.last() != Some(&style) { + self.path_tags.push(PathTag::STYLE); + self.styles.push(style); + } + } + /// Encodes a transform. /// /// If the given transform is different from the current one, encodes it and diff --git a/examples/scenes/src/test_scenes.rs b/examples/scenes/src/test_scenes.rs index f12ff17ea..cf649f4d0 100644 --- a/examples/scenes/src/test_scenes.rs +++ b/examples/scenes/src/test_scenes.rs @@ -106,12 +106,13 @@ fn stroke_styles(sb: &mut SceneBuilder, params: &mut SceneParams) { Color::rgb8(201, 147, 206), Color::rgb8(150, 195, 160), ]; - let simple_stroke = [LineTo((100., 0.).into())]; + let simple_stroke = [MoveTo((0., 0.).into()), LineTo((100., 0.).into())]; let join_stroke = [ + MoveTo((0., 0.).into()), CurveTo((20., 0.).into(), (42.5, 5.).into(), (50., 25.).into()), CurveTo((57.5, 5.).into(), (80., 0.).into(), (100., 0.).into()), ]; - let miter_stroke = [LineTo((90., 21.).into()), LineTo((0., 42.).into())]; + let miter_stroke = [MoveTo((0., 0.).into()), LineTo((90., 21.).into()), LineTo((0., 42.).into())]; let cap_styles = [Cap::Butt, Cap::Square, Cap::Round]; let join_styles = [Join::Bevel, Join::Miter, Join::Round]; let miter_limits = [4., 5., 0.1, 10.]; diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index e9f516eaa..f65e9ae19 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -89,6 +89,55 @@ fn eval_cubic(p0: vec2, p1: vec2, p2: vec2, p3: vec2, t: f32 return p0 * (mt * mt * mt) + (p1 * (mt * mt * 3.0) + (p2 * (mt * 3.0) + p3 * t) * t) * t; } +fn eval_cubic_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2, t: f32) -> vec2 { + let dp0 = 3. * (p1 - p0); + let dp1 = 3. * (p2 - p1); + let dp2 = 3. * (p3 - p2); + return eval_quad(dp0, dp1, dp2, t); +} + +fn eval_cubic_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2, t: f32) -> vec2 { + let tangent = eval_cubic_tangent(p0, p1, p2, p3, t); + return vec2(-tangent.y, tangent.x); +} + +fn eval_quad_tangent(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec2 { + let dp0 = 2. * (p1 - p0); + let dp1 = 2. * (p2 - p1); + return mix(dp0, dp1, t); +} + +fn eval_quad_normal(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec2 { + let tangent = eval_quad_tangent(p0, p1, p2, t); + return vec2(-tangent.y, tangent.x); +} + +fn cubic_start_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { + let EPS = 1e-12; + let d01 = p1 - p0; + let d02 = p2 - p0; + let d03 = p3 - p0; + return select(select(d03, d02, dot(d02, d02) > EPS), d01, dot(d01, d01) > EPS); +} + +fn cubic_end_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { + let EPS = 1e-12; + let d23 = p3 - p2; + let d13 = p3 - p1; + let d03 = p3 - p0; + return select(select(d03, d13, dot(d13, d13) > EPS), d23, dot(d23, d23) > EPS); +} + +fn cubic_start_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { + let tangent = cubic_start_tangent(p0, p1, p2, p3); + return vec2(-tangent.y, tangent.x); +} + +fn cubic_end_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { + let tangent = cubic_end_tangent(p0, p1, p2, p3); + return vec2(-tangent.y, tangent.x); +} + let MAX_QUADS = 16u; fn flatten_cubic(cubic: Cubic) { @@ -113,11 +162,22 @@ fn flatten_cubic(cubic: Cubic) { let qp2 = eval_cubic(p0, p1, p2, p3, t); var qp1 = eval_cubic(p0, p1, p2, p3, t - 0.5 * step); qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2); - let params = estimate_subdiv(qp0, qp1, qp2, sqrt(REM_ACCURACY)); + + // HACK: this increase subdivision count as function of the stroke width for shitty strokes. + var tol = sqrt(REM_ACCURACY); + if cubic.flags == 1u { + tol *= min(1000., dot(cubic.stroke, cubic.stroke)); + } + let params = estimate_subdiv(qp0, qp1, qp2, tol); keep_params[i] = params; val += params.val; qp0 = qp2; } + + // HACK: normal vector used to offset line segments for shitty stroke handling. + var n0 = cubic_start_normal(p0, p1, p2, p3); + n0 = normalize(n0) * cubic.stroke; + let n = max(u32(ceil(val * (0.5 / sqrt(REM_ACCURACY)))), 1u); var lp0 = p0; qp0 = p0; @@ -129,6 +189,7 @@ fn flatten_cubic(cubic: Cubic) { let qp2 = eval_cubic(p0, p1, p2, p3, t); var qp1 = eval_cubic(p0, p1, p2, p3, t - 0.5 * step); qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2); + let qp0_normal = eval_quad_normal(qp0, qp1, qp2, 0.); let params = keep_params[i]; let u0 = approx_parabola_inv_integral(params.a0); let u2 = approx_parabola_inv_integral(params.a2); @@ -136,20 +197,37 @@ fn flatten_cubic(cubic: Cubic) { var val_target = f32(n_out) * v_step; while n_out == n || val_target < val_sum + params.val { var lp1: vec2; + var t1: f32; if n_out == n { lp1 = p3; + t1 = 1.; } else { let u = (val_target - val_sum) / params.val; let a = mix(params.a0, params.a2, u); let au = approx_parabola_inv_integral(a); let t = (au - u0) * uscale; + t1 = t; lp1 = eval_quad(qp0, qp1, qp2, t); } - // Output line segment lp0..lp1 - let line_ix = atomicAdd(&bump.lines, 1u); - // TODO: check failure - lines[line_ix] = LineSoup(cubic.path_ix, lp0, lp1); + if cubic.flags == 1u { + var n1: vec2f; + if all(lp1 == p3) { + n1 = cubic_end_normal(p0, p1, p2, p3); + } else { + n1 = eval_quad_normal(qp0, qp1, qp2, t1); + } + n1 = normalize(n1) * cubic.stroke; + let line_ix = atomicAdd(&bump.lines, 2u); + lines[line_ix] = LineSoup(cubic.path_ix, lp0 + n0, lp1 + n1); + lines[line_ix + 1u] = LineSoup(cubic.path_ix, lp1 - n1, lp0 - n0); + n0 = n1; + } else { + // Output line segment lp0..lp1 + let line_ix = atomicAdd(&bump.lines, 1u); + // TODO: check failure + lines[line_ix] = LineSoup(cubic.path_ix, lp0, lp1); + } n_out += 1u; val_target += v_step; lp0 = lp1; @@ -220,8 +298,7 @@ fn main( let out = &path_bboxes[tm.path_ix]; let style_flags = scene[config.style_base + tm.style_ix]; - // TODO: We assume all paths are fills at the moment. This is where we will extract the stroke - // vs fill state using STYLE_FLAGS_STYLE_BIT. + // The fill bit is always set to 0 for strokes which represents a non-zero fill. let draw_flags = select(DRAW_INFO_FLAGS_FILL_RULE_BIT, 0u, (style_flags & STYLE_FLAGS_FILL_BIT) == 0u); if (tag_byte & PATH_TAG_PATH) != 0u { (*out).draw_flags = draw_flags; @@ -276,17 +353,27 @@ fn main( } var stroke = vec2(0.0, 0.0); let is_stroke = (style_flags & STYLE_FLAGS_STYLE_BIT) != 0u; - /* - // TODO: the stroke handling here is dead code for now if is_stroke { + // TODO: WIP + let linewidth = bitcast(scene[config.style_base + tm.style_ix + 1u]); // See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm // This is the correct bounding box, but we're not handling rendering // in the isotropic case, so it may mismatch. stroke = 0.5 * linewidth * vec2(length(transform.mat.xz), length(transform.mat.yw)); bbox += vec4(-stroke, stroke); + + flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); + + // TODO: proper caps + let n0 = normalize(cubic_start_normal(p0, p1, p2, p3)) * stroke; + let n1 = normalize(cubic_end_normal(p0, p1, p2, p3)) * stroke; + + let line_ix = atomicAdd(&bump.lines, 2u); + lines[line_ix] = LineSoup(tm.path_ix, p0 - n0, p0 + n0); + lines[line_ix + 1u] = LineSoup(tm.path_ix, p3 + n1, p3 - n1); + } else { + flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); } - */ - flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); // Update bounding box using atomics only. Computing a monoid is a // potential future optimization. if bbox.z > bbox.x || bbox.w > bbox.y { diff --git a/src/scene.rs b/src/scene.rs index 13c61ca1a..0e42ccf1f 100644 --- a/src/scene.rs +++ b/src/scene.rs @@ -149,26 +149,45 @@ impl<'a> SceneBuilder<'a> { brush_transform: Option, shape: &impl Shape, ) { - // The setting for tolerance are a compromise. For most applications, - // shape tolerance doesn't matter, as the input is likely Bézier paths, - // which is exact. Note that shape tolerance is hard-coded as 0.1 in - // the encoding crate. - // - // Stroke tolerance is a different matter. Generally, the cost scales - // with inverse O(n^6), so there is moderate rendering cost to setting - // too fine a value. On the other hand, error scales with the transform - // applied post-stroking, so may exceed visible threshold. When we do - // GPU-side stroking, the transform will be known. In the meantime, - // this is a compromise. - const SHAPE_TOLERANCE: f64 = 0.01; - const STROKE_TOLERANCE: f64 = SHAPE_TOLERANCE; - let stroked = peniko::kurbo::stroke( - shape.path_elements(SHAPE_TOLERANCE), - style, - &Default::default(), - STROKE_TOLERANCE, - ); - self.fill(Fill::NonZero, transform, brush, brush_transform, &stroked); + const GPU_STROKES: bool = true; + if GPU_STROKES { + // TODO: handle dashing by using a DashIterator + self.scene + .encode_transform(Transform::from_kurbo(&transform)); + self.scene.encode_stroke_style(style); + if self.scene.encode_shape(shape, false) { + if let Some(brush_transform) = brush_transform { + if self + .scene + .encode_transform(Transform::from_kurbo(&(transform * brush_transform))) + { + self.scene.swap_last_path_tags(); + } + } + self.scene.encode_brush(brush, 1.0); + } + } else { + // The setting for tolerance are a compromise. For most applications, + // shape tolerance doesn't matter, as the input is likely Bézier paths, + // which is exact. Note that shape tolerance is hard-coded as 0.1 in + // the encoding crate. + // + // Stroke tolerance is a different matter. Generally, the cost scales + // with inverse O(n^6), so there is moderate rendering cost to setting + // too fine a value. On the other hand, error scales with the transform + // applied post-stroking, so may exceed visible threshold. When we do + // GPU-side stroking, the transform will be known. In the meantime, + // this is a compromise. + const SHAPE_TOLERANCE: f64 = 0.01; + const STROKE_TOLERANCE: f64 = SHAPE_TOLERANCE; + let stroked = peniko::kurbo::stroke( + shape.path_elements(SHAPE_TOLERANCE), + style, + &Default::default(), + STROKE_TOLERANCE, + ); + self.fill(Fill::NonZero, transform, brush, brush_transform, &stroked); + } } /// Draws an image at its natural size with the given transform. From f72a066a830acdbc69baeabd37c03a2c85c59fa9 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Tue, 7 Nov 2023 12:46:59 -0800 Subject: [PATCH 02/12] Add closed paths to the stroke styles test scene --- examples/scenes/src/test_scenes.rs | 39 +++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/examples/scenes/src/test_scenes.rs b/examples/scenes/src/test_scenes.rs index cf649f4d0..14597d774 100644 --- a/examples/scenes/src/test_scenes.rs +++ b/examples/scenes/src/test_scenes.rs @@ -112,7 +112,20 @@ fn stroke_styles(sb: &mut SceneBuilder, params: &mut SceneParams) { CurveTo((20., 0.).into(), (42.5, 5.).into(), (50., 25.).into()), CurveTo((57.5, 5.).into(), (80., 0.).into(), (100., 0.).into()), ]; - let miter_stroke = [MoveTo((0., 0.).into()), LineTo((90., 21.).into()), LineTo((0., 42.).into())]; + let miter_stroke = [ + MoveTo((0., 0.).into()), + LineTo((90., 21.).into()), + LineTo((0., 42.).into()), + ]; + let closed_strokes = [ + MoveTo((0., 0.).into()), + LineTo((90., 21.).into()), + LineTo((0., 42.).into()), + ClosePath, + MoveTo((200., 0.).into()), + CurveTo((100., 42.).into(), (300., 42.).into(), (200., 0.).into()), + ClosePath, + ]; let cap_styles = [Cap::Butt, Cap::Square, Cap::Round]; let join_styles = [Join::Bevel, Join::Miter, Join::Round]; let miter_limits = [4., 5., 0.1, 10.]; @@ -193,6 +206,30 @@ fn stroke_styles(sb: &mut SceneBuilder, params: &mut SceneParams) { y += 180.; color_idx = (color_idx + 1) % colors.len(); } + + // Closed paths + let t = Affine::translate((500., 0.)) * t; + y = 0.; + for (i, join) in join_styles.iter().enumerate() { + params.text.add( + sb, + None, + 12., + None, + Affine::translate((0., y)) * t, + &format!("Closed path with join: {:?}", join), + ); + // The cap style is not important since a closed path shouldn't have any caps. + sb.stroke( + &Stroke::new(10.).with_caps(cap_styles[i]).with_join(*join), + Affine::translate((0., y + 30.)) * t, + colors[color_idx], + None, + &closed_strokes, + ); + y += 180.; + color_idx = (color_idx + 1) % colors.len(); + } } // This test has been adapted from Skia's "trickycubicstrokes" GM slide which can be found at From 08fc286a3d10bbd770c1941c1db588b07a85037a Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Tue, 7 Nov 2023 13:19:16 -0800 Subject: [PATCH 03/12] [encoding] Implement stroke cap marker segment encoding - Implemented the stroke cap marker segment encoding scheme outlined in #303. The proposed scheme has been slightly modified such that instead of an open/closed bit, the segment is encoded as a lineto or quadto to distinguish between the different states. Encoding the marker segment as a quad to for open segments also handles a limitation with only using linetos: for an open path, the marker must encode both the start tangent and the coordinates of the first control point to correctly position a start cap. Encoding these as 4 f32s requires an implicit moveto before the lineto which must terminate the preceding subpath. Encoding this as a quadto preserves the invariant that every stroked subpath only contains one segment with the SUBPATH_END_BIT set to 1, which now serves as the stroke cap marker. For a closed path, a lineto is sufficient since the preceding coordinate (from the lineto encoded for a "closepath" verb) must already equal the subpath's first control point. - Shitty stroking continues to work by detecting and ignoring the stroke cap marker. --- crates/encoding/src/path.rs | 94 ++++++++++++++++++++++++++++++++++--- shader/flatten.wgsl | 35 +++++++------- shader/shared/pathtag.wgsl | 1 + 3 files changed, 107 insertions(+), 23 deletions(-) diff --git a/crates/encoding/src/path.rs b/crates/encoding/src/path.rs index 5676398fc..0d943dcde 100644 --- a/crates/encoding/src/path.rs +++ b/crates/encoding/src/path.rs @@ -418,6 +418,7 @@ pub struct PathEncoder<'a> { n_segments: &'a mut u32, n_paths: &'a mut u32, first_point: [f32; 2], + first_start_tangent_end: [f32; 2], state: PathState, n_encoded_segments: u32, is_fill: bool, @@ -431,8 +432,42 @@ enum PathState { } impl<'a> PathEncoder<'a> { - /// Creates a new path encoder for the specified path tags and data. If `is_fill` is true, - /// ensures that all subpaths are closed. + /// Creates a new path encoder for the specified path tags and data. + /// + /// If `is_fill` is true, ensures that all subpaths are closed. Otherwise, the path is treated + /// as a stroke and an additional "stroke cap marker" segment is inserted at the end of every + /// subpath. + /// + /// Stroke Encoding + /// --------------- + /// Every subpath within a stroked path is terminated with a "stroke cap marker" segment. This + /// segment tells the GPU stroker whether to draw a cap or a join based on the topology of the + /// path: + /// + /// 1. This marker segment is encoded as a `quad-to` for an open path and a `line-to` for a + /// closed path. An open path gets drawn with a start and end cap. A closed path gets drawn + /// with a single join in place of the caps where the subpath's start and end control points + /// meet. + /// + /// 2. The marker segment tells the GPU flattening stage how to render caps and joins while + /// processing each path segment in parallel. All subpaths end with the marker segment which + /// is the only segment that has the `SUBPATH_END_BIT` set to 1. + /// + /// The algorithm is as follows: + /// + /// a) If a GPU thread is processing a regular segment (i.e. `SUBPATH_END_BIT` is 0), it + /// outputs the offset curves for the segment. If the segment is immediately followed by + /// the marker segment, then the same thread draws an end cap if the subpath is open + /// (i.e. the marker is a quad-to) or a join if the subpath is closed (i.e. the marker is + /// a line-to) using the tangent encoded in the marker segment. + /// + /// If the segment is immediately followed by another regular segment, then the thread + /// draws a join using the start tangent of the neighboring segment. + /// + /// b) If a GPU thread is processing the marker segment (i.e. `SUBPATH_END_BIT` is 1), then + /// it draws a start cap using the information encoded in the segment IF the subpath is + /// open (i.e. the marker is a quad-to). If the subpath is closed (i.e. the marker is a + /// line-to), the thread draws nothing. pub fn new( tags: &'a mut Vec, data: &'a mut Vec, @@ -446,6 +481,7 @@ impl<'a> PathEncoder<'a> { n_segments, n_paths, first_point: [0.0, 0.0], + first_start_tangent_end: [0.0, 0.0], state: PathState::Start, n_encoded_segments: 0, is_fill, @@ -459,15 +495,18 @@ impl<'a> PathEncoder<'a> { } let buf = [x, y]; let bytes = bytemuck::bytes_of(&buf); - self.first_point = buf; if self.state == PathState::MoveTo { let new_len = self.data.len() - 8; self.data.truncate(new_len); } else if self.state == PathState::NonemptySubpath { + if !self.is_fill { + self.insert_stroke_cap_marker_segment(false); + } if let Some(tag) = self.tags.last_mut() { tag.set_subpath_end(); } } + self.first_point = buf; self.data.extend_from_slice(bytes); self.state = PathState::MoveTo; } @@ -483,6 +522,10 @@ impl<'a> PathEncoder<'a> { } self.move_to(self.first_point[0], self.first_point[1]); } + if self.state == PathState::MoveTo { + // TODO: Drop the segment if its length is zero + self.first_start_tangent_end = [x, y]; + } let buf = [x, y]; let bytes = bytemuck::bytes_of(&buf); self.data.extend_from_slice(bytes); @@ -500,6 +543,11 @@ impl<'a> PathEncoder<'a> { } self.move_to(self.first_point[0], self.first_point[1]); } + if self.state == PathState::MoveTo { + // TODO: Drop the segment if its length is zero + // TODO: Pick (x2, y2) if [(x0, y0), (x1, y1)] has a length of zero + self.first_start_tangent_end = [x1, y1]; + } let buf = [x1, y1, x2, y2]; let bytes = bytemuck::bytes_of(&buf); self.data.extend_from_slice(bytes); @@ -517,6 +565,12 @@ impl<'a> PathEncoder<'a> { } self.move_to(self.first_point[0], self.first_point[1]); } + if self.state == PathState::MoveTo { + // TODO: Drop the segment if its length is zero + // TODO: Pick (x2, y2) if [(x0, y0), (x1, y1)] has a length of zero + // Pick (x3, y3) if [(x0, y0), (x2, y2)] has a length of zero + self.first_start_tangent_end = [x1, y1]; + } let buf = [x1, y1, x2, y2, x3, y3]; let bytes = bytemuck::bytes_of(&buf); self.data.extend_from_slice(bytes); @@ -545,11 +599,13 @@ impl<'a> PathEncoder<'a> { let first_bytes = bytemuck::bytes_of(&self.first_point); if &self.data[len - 8..len] != first_bytes { self.data.extend_from_slice(first_bytes); - let mut tag = PathTag::LINE_TO_F32; - tag.set_subpath_end(); - self.tags.push(tag); + self.tags.push(PathTag::LINE_TO_F32); self.n_encoded_segments += 1; - } else if let Some(tag) = self.tags.last_mut() { + } + if !self.is_fill { + self.insert_stroke_cap_marker_segment(true); + } + if let Some(tag) = self.tags.last_mut() { tag.set_subpath_end(); } self.state = PathState::Start; @@ -592,6 +648,9 @@ impl<'a> PathEncoder<'a> { self.data.truncate(new_len); } if self.n_encoded_segments != 0 { + if !self.is_fill && self.state == PathState::NonemptySubpath { + self.insert_stroke_cap_marker_segment(false); + } if let Some(tag) = self.tags.last_mut() { tag.set_subpath_end(); } @@ -603,6 +662,27 @@ impl<'a> PathEncoder<'a> { } self.n_encoded_segments } + + fn insert_stroke_cap_marker_segment(&mut self, is_closed: bool) { + assert!(!self.is_fill); + assert!(self.state == PathState::NonemptySubpath); + if is_closed { + // We expect that the most recently encoded pair of coordinates in the path data stream + // contain the first control point in the path segment (see `PathEncoder::close`). + // Hence a line-to encoded here should embed the subpath's start tangent. + self.line_to( + self.first_start_tangent_end[0], + self.first_start_tangent_end[1], + ); + } else { + self.quad_to( + self.first_point[0], + self.first_point[1], + self.first_start_tangent_end[0], + self.first_start_tangent_end[1], + ); + } + } } #[cfg(feature = "full")] diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index f65e9ae19..7ff5a4ab8 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -354,23 +354,26 @@ fn main( var stroke = vec2(0.0, 0.0); let is_stroke = (style_flags & STYLE_FLAGS_STYLE_BIT) != 0u; if is_stroke { - // TODO: WIP - let linewidth = bitcast(scene[config.style_base + tm.style_ix + 1u]); - // See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm - // This is the correct bounding box, but we're not handling rendering - // in the isotropic case, so it may mismatch. - stroke = 0.5 * linewidth * vec2(length(transform.mat.xz), length(transform.mat.yw)); - bbox += vec4(-stroke, stroke); + // TODO: FIX + if (tag_byte & PATH_TAG_SUBPATH_END_BIT) == 0u { + // TODO: WIP + let linewidth = bitcast(scene[config.style_base + tm.style_ix + 1u]); + // See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm + // This is the correct bounding box, but we're not handling rendering + // in the isotropic case, so it may mismatch. + stroke = 0.5 * linewidth * vec2(length(transform.mat.xz), length(transform.mat.yw)); + bbox += vec4(-stroke, stroke); + + flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); + + // TODO: proper caps + let n0 = normalize(cubic_start_normal(p0, p1, p2, p3)) * stroke; + let n1 = normalize(cubic_end_normal(p0, p1, p2, p3)) * stroke; - flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); - - // TODO: proper caps - let n0 = normalize(cubic_start_normal(p0, p1, p2, p3)) * stroke; - let n1 = normalize(cubic_end_normal(p0, p1, p2, p3)) * stroke; - - let line_ix = atomicAdd(&bump.lines, 2u); - lines[line_ix] = LineSoup(tm.path_ix, p0 - n0, p0 + n0); - lines[line_ix + 1u] = LineSoup(tm.path_ix, p3 + n1, p3 - n1); + let line_ix = atomicAdd(&bump.lines, 2u); + lines[line_ix] = LineSoup(tm.path_ix, p0 - n0, p0 + n0); + lines[line_ix + 1u] = LineSoup(tm.path_ix, p3 + n1, p3 - n1); + } } else { flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); } diff --git a/shader/shared/pathtag.wgsl b/shader/shared/pathtag.wgsl index 0aa942d90..37f0235ed 100644 --- a/shader/shared/pathtag.wgsl +++ b/shader/shared/pathtag.wgsl @@ -21,6 +21,7 @@ let PATH_TAG_TRANSFORM = 0x20u; let PATH_TAG_PATH = 0x10u; let PATH_TAG_STYLE = 0x40u; #endif +let PATH_TAG_SUBPATH_END_BIT = 4u; // Size of the `Style` data structure in words let STYLE_SIZE_IN_WORDS: u32 = 2u; From 73dd128c522bebe75763ae238bb2dd6d2a491e73 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Tue, 7 Nov 2023 14:43:23 -0800 Subject: [PATCH 04/12] Update caps and joins to use the new stroke cap marker encoding The flatten stage now makes use of the stroke cap marker encoding scheme to draw caps and joins at the right locations. At this stage only the butt cap and bevel join styles are supported. --- shader/flatten.wgsl | 235 ++++++++++++++++++++++++++++++-------------- 1 file changed, 159 insertions(+), 76 deletions(-) diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index 7ff5a4ab8..dcd656990 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -282,100 +282,183 @@ fn round_up(x: f32) -> i32 { return i32(ceil(x)); } +struct PathTagData { + tag_byte: u32, + monoid: TagMonoid, +} + +fn compute_tag_monoid(ix: u32) -> PathTagData { + let tag_word = scene[config.pathtag_base + (ix >> 2u)]; + let shift = (ix & 3u) * 8u; + var tm = reduce_tag(tag_word & ((1u << shift) - 1u)); + // TODO: this can be a read buf overflow. Conditionalize by tag byte? + tm = combine_tag_monoid(tag_monoids[ix >> 2u], tm); + var tag_byte = (tag_word >> shift) & 0xffu; + return PathTagData(tag_byte, tm); +} + +struct CubicPoints { + p0: vec2f, + p1: vec2f, + p2: vec2f, + p3: vec2f, +} + +fn read_path_segment(tag: PathTagData, transform: Transform, is_stroke: bool) -> CubicPoints { + var p0: vec2; + var p1: vec2; + var p2: vec2; + var p3: vec2; + + var seg_type = tag.tag_byte & PATH_TAG_SEG_TYPE; + let pathseg_offset = tag.monoid.pathseg_offset; + let is_stroke_cap_marker = is_stroke && (tag.tag_byte & PATH_TAG_SUBPATH_END_BIT) != 0u; + let is_open = seg_type == PATH_TAG_QUADTO; + + if (tag.tag_byte & PATH_TAG_F32) != 0u { + p0 = read_f32_point(pathseg_offset); + p1 = read_f32_point(pathseg_offset + 2u); + if seg_type >= PATH_TAG_QUADTO { + p2 = read_f32_point(pathseg_offset + 4u); + if seg_type == PATH_TAG_CUBICTO { + p3 = read_f32_point(pathseg_offset + 6u); + } + } + } else { + p0 = read_i16_point(pathseg_offset); + p1 = read_i16_point(pathseg_offset + 1u); + if seg_type >= PATH_TAG_QUADTO { + p2 = read_i16_point(pathseg_offset + 2u); + if seg_type == PATH_TAG_CUBICTO { + p3 = read_i16_point(pathseg_offset + 3u); + } + } + } + + if is_stroke_cap_marker && is_open { + // TODO: document + p0 = transform_apply(transform, p1); + p1 = transform_apply(transform, p2); + seg_type = PATH_TAG_LINETO; + } else { + p0 = transform_apply(transform, p0); + p1 = transform_apply(transform, p1); + } + + // Degree-raise + if seg_type == PATH_TAG_LINETO { + p3 = p1; + p2 = mix(p3, p0, 1.0 / 3.0); + p1 = mix(p0, p3, 1.0 / 3.0); + } else if seg_type >= PATH_TAG_QUADTO { + p2 = transform_apply(transform, p2); + if seg_type == PATH_TAG_CUBICTO { + p3 = transform_apply(transform, p3); + } else { + p3 = p2; + p2 = mix(p1, p2, 1.0 / 3.0); + p1 = mix(p1, p0, 1.0 / 3.0); + } + } + + return CubicPoints(p0, p1, p2, p3); +} + +struct NeighboringSegment { + do_join: bool, + p0: vec2f, + + // Normalized device-space start tangent vector + tangent: vec2f, +} + +fn read_neighboring_segment(ix: u32) -> NeighboringSegment { + let tag = compute_tag_monoid(ix); + let transform = read_transform(config.transform_base, tag.monoid.trans_ix); + let pts = read_path_segment(tag, transform, true); + + let is_closed = (tag.tag_byte & PATH_TAG_SEG_TYPE) == PATH_TAG_LINETO; + let is_stroke_cap_marker = (tag.tag_byte & PATH_TAG_SUBPATH_END_BIT) != 0u; + let do_join = !is_stroke_cap_marker || is_closed; + let p0 = pts.p0; + let tangent = cubic_start_tangent(pts.p0, pts.p1, pts.p2, pts.p3); + return NeighboringSegment(do_join, p0, normalize(tangent)); +} + @compute @workgroup_size(256) fn main( @builtin(global_invocation_id) global_id: vec3, @builtin(local_invocation_id) local_id: vec3, ) { let ix = global_id.x; - let tag_word = scene[config.pathtag_base + (ix >> 2u)]; pathdata_base = config.pathdata_base; - let shift = (ix & 3u) * 8u; - var tm = reduce_tag(tag_word & ((1u << shift) - 1u)); - // TODO: this can be a read buf overflow. Conditionalize by tag byte? - tm = combine_tag_monoid(tag_monoids[ix >> 2u], tm); - var tag_byte = (tag_word >> shift) & 0xffu; - let out = &path_bboxes[tm.path_ix]; - let style_flags = scene[config.style_base + tm.style_ix]; + let tag = compute_tag_monoid(ix); + let path_ix = tag.monoid.path_ix; + let style_ix = tag.monoid.style_ix; + let trans_ix = tag.monoid.trans_ix; + + let out = &path_bboxes[path_ix]; + let style_flags = scene[config.style_base + style_ix]; // The fill bit is always set to 0 for strokes which represents a non-zero fill. let draw_flags = select(DRAW_INFO_FLAGS_FILL_RULE_BIT, 0u, (style_flags & STYLE_FLAGS_FILL_BIT) == 0u); - if (tag_byte & PATH_TAG_PATH) != 0u { + if (tag.tag_byte & PATH_TAG_PATH) != 0u { (*out).draw_flags = draw_flags; - (*out).trans_ix = tm.trans_ix; + (*out).trans_ix = trans_ix; } // Decode path data - let seg_type = tag_byte & PATH_TAG_SEG_TYPE; + let seg_type = tag.tag_byte & PATH_TAG_SEG_TYPE; if seg_type != 0u { - var p0: vec2; - var p1: vec2; - var p2: vec2; - var p3: vec2; - if (tag_byte & PATH_TAG_F32) != 0u { - p0 = read_f32_point(tm.pathseg_offset); - p1 = read_f32_point(tm.pathseg_offset + 2u); - if seg_type >= PATH_TAG_QUADTO { - p2 = read_f32_point(tm.pathseg_offset + 4u); - if seg_type == PATH_TAG_CUBICTO { - p3 = read_f32_point(tm.pathseg_offset + 6u); - } - } - } else { - p0 = read_i16_point(tm.pathseg_offset); - p1 = read_i16_point(tm.pathseg_offset + 1u); - if seg_type >= PATH_TAG_QUADTO { - p2 = read_i16_point(tm.pathseg_offset + 2u); - if seg_type == PATH_TAG_CUBICTO { - p3 = read_i16_point(tm.pathseg_offset + 3u); - } - } - } - let transform = read_transform(config.transform_base, tm.trans_ix); - p0 = transform_apply(transform, p0); - p1 = transform_apply(transform, p1); - var bbox = vec4(min(p0, p1), max(p0, p1)); - // Degree-raise - if seg_type == PATH_TAG_LINETO { - p3 = p1; - p2 = mix(p3, p0, 1.0 / 3.0); - p1 = mix(p0, p3, 1.0 / 3.0); - } else if seg_type >= PATH_TAG_QUADTO { - p2 = transform_apply(transform, p2); - bbox = vec4(min(bbox.xy, p2), max(bbox.zw, p2)); - if seg_type == PATH_TAG_CUBICTO { - p3 = transform_apply(transform, p3); - bbox = vec4(min(bbox.xy, p3), max(bbox.zw, p3)); - } else { - p3 = p2; - p2 = mix(p1, p2, 1.0 / 3.0); - p1 = mix(p1, p0, 1.0 / 3.0); - } - } - var stroke = vec2(0.0, 0.0); let is_stroke = (style_flags & STYLE_FLAGS_STYLE_BIT) != 0u; - if is_stroke { - // TODO: FIX - if (tag_byte & PATH_TAG_SUBPATH_END_BIT) == 0u { - // TODO: WIP - let linewidth = bitcast(scene[config.style_base + tm.style_ix + 1u]); - // See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm - // This is the correct bounding box, but we're not handling rendering - // in the isotropic case, so it may mismatch. - stroke = 0.5 * linewidth * vec2(length(transform.mat.xz), length(transform.mat.yw)); - bbox += vec4(-stroke, stroke); - - flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); - - // TODO: proper caps - let n0 = normalize(cubic_start_normal(p0, p1, p2, p3)) * stroke; - let n1 = normalize(cubic_end_normal(p0, p1, p2, p3)) * stroke; + let transform = read_transform(config.transform_base, trans_ix); + let pts = read_path_segment(tag, transform, is_stroke); + var bbox = vec4(min(pts.p0, pts.p1), max(pts.p0, pts.p1)); + bbox = vec4(min(bbox.xy, pts.p2), max(bbox.zw, pts.p2)); + bbox = vec4(min(bbox.xy, pts.p3), max(bbox.zw, pts.p3)); - let line_ix = atomicAdd(&bump.lines, 2u); - lines[line_ix] = LineSoup(tm.path_ix, p0 - n0, p0 + n0); - lines[line_ix + 1u] = LineSoup(tm.path_ix, p3 + n1, p3 - n1); + var stroke = vec2(0.0, 0.0); + if is_stroke { + let linewidth = bitcast(scene[config.style_base + style_ix + 1u]); + // See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm + // This is the correct bounding box, but we're not handling rendering + // in the isotropic case, so it may mismatch. + stroke = 0.5 * linewidth * vec2(length(transform.mat.xz), length(transform.mat.yw)); + bbox += vec4(-stroke, stroke); + let is_open = (tag.tag_byte & PATH_TAG_SEG_TYPE) != PATH_TAG_LINETO; + let is_stroke_cap_marker = (tag.tag_byte & PATH_TAG_SUBPATH_END_BIT) != 0u; + if is_stroke_cap_marker { + if is_open { + let tangent = normalize(pts.p1 - pts.p0); + let n = vec2f(-tangent.y, tangent.x) * stroke; + + // Draw start cap + let line_ix = atomicAdd(&bump.lines, 1u); + lines[line_ix] = LineSoup(path_ix, pts.p0 - n, pts.p0 + n); + } else { + // Don't draw anything if the path is closed. + } + bbox = vec4(1., 1., -1., -1.); + } else { + // Render offset curves + flatten_cubic(Cubic(pts.p0, pts.p1, pts.p2, pts.p3, stroke, path_ix, u32(is_stroke))); + + // Read the neighboring segment. + let neighbor = read_neighboring_segment(ix + 1u); + let n = normalize(cubic_end_normal(pts.p0, pts.p1, pts.p2, pts.p3)) * stroke; + if neighbor.do_join { + // Draw join. + let nn = vec2(-neighbor.tangent.y, neighbor.tangent.x) * stroke; + let line_ix = atomicAdd(&bump.lines, 2u); + lines[line_ix] = LineSoup(path_ix, pts.p3 + n, neighbor.p0 + nn); + lines[line_ix + 1u] = LineSoup(path_ix, neighbor.p0 - nn, pts.p3 - n); + } else { + // Draw end cap. + let line_ix = atomicAdd(&bump.lines, 1u); + lines[line_ix] = LineSoup(path_ix, pts.p3 + n, pts.p3 - n); + } } } else { - flatten_cubic(Cubic(p0, p1, p2, p3, stroke, tm.path_ix, u32(is_stroke))); + flatten_cubic(Cubic(pts.p0, pts.p1, pts.p2, pts.p3, stroke, path_ix, u32(is_stroke))); } // Update bounding box using atomics only. Computing a monoid is a // potential future optimization. From 13810b85d81681f30ad8e37f07a3e5448179516b Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Wed, 8 Nov 2023 01:02:13 -0800 Subject: [PATCH 05/12] [encoding] Eliminate zero-length start segments Make sure that a path start segment has a tangent that has a non-zero length. --- crates/encoding/src/path.rs | 41 +++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/crates/encoding/src/path.rs b/crates/encoding/src/path.rs index 0d943dcde..d35edc3e1 100644 --- a/crates/encoding/src/path.rs +++ b/crates/encoding/src/path.rs @@ -523,7 +523,13 @@ impl<'a> PathEncoder<'a> { self.move_to(self.first_point[0], self.first_point[1]); } if self.state == PathState::MoveTo { - // TODO: Drop the segment if its length is zero + let x0 = self.first_point[0]; + let y0 = self.first_point[1]; + // TODO: should this be an exact match? + if (x - x0).abs() < 1e-12 && (y - y0).abs() < 1e-12 { + // Drop the segment if its length is zero + return; + } self.first_start_tangent_end = [x, y]; } let buf = [x, y]; @@ -544,9 +550,18 @@ impl<'a> PathEncoder<'a> { self.move_to(self.first_point[0], self.first_point[1]); } if self.state == PathState::MoveTo { - // TODO: Drop the segment if its length is zero - // TODO: Pick (x2, y2) if [(x0, y0), (x1, y1)] has a length of zero - self.first_start_tangent_end = [x1, y1]; + let x0 = self.first_point[0]; + let y0 = self.first_point[1]; + // TODO clean this up + let (x, y) = if (x1 - x0).abs() > 1e-12 || (y1 - y0).abs() > 1e-12 { + (x1, y1) + } else if (x2 - x0).abs() > 1e-12 || (y2 - y0).abs() > 1e-12 { + (x2, y2) + } else { + // Drop the segment if its length is zero + return; + }; + self.first_start_tangent_end = [x, y]; } let buf = [x1, y1, x2, y2]; let bytes = bytemuck::bytes_of(&buf); @@ -566,10 +581,20 @@ impl<'a> PathEncoder<'a> { self.move_to(self.first_point[0], self.first_point[1]); } if self.state == PathState::MoveTo { - // TODO: Drop the segment if its length is zero - // TODO: Pick (x2, y2) if [(x0, y0), (x1, y1)] has a length of zero - // Pick (x3, y3) if [(x0, y0), (x2, y2)] has a length of zero - self.first_start_tangent_end = [x1, y1]; + let x0 = self.first_point[0]; + let y0 = self.first_point[1]; + // TODO clean this up + let (x, y) = if (x1 - x0).abs() > 1e-12 || (y1 - y0).abs() > 1e-12 { + (x1, y1) + } else if (x2 - x0).abs() > 1e-12 || (y2 - y0).abs() > 1e-12 { + (x2, y2) + } else if (x3 - x0).abs() > 1e-12 || (y3 - y0).abs() > 1e-12 { + (x3, y3) + } else { + // Drop the segment if its length is zero + return; + }; + self.first_start_tangent_end = [x, y]; } let buf = [x1, y1, x2, y2, x3, y3]; let bytes = bytemuck::bytes_of(&buf); From deba7c80a196b06901a098387227a4037fc0cd73 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Wed, 8 Nov 2023 08:14:08 -0800 Subject: [PATCH 06/12] Update closed stroke tests - Add versions of the cubics without co-incident start and end points - Reposition the closed paths so they fit within the view --- examples/scenes/src/test_scenes.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/examples/scenes/src/test_scenes.rs b/examples/scenes/src/test_scenes.rs index 14597d774..8705f626d 100644 --- a/examples/scenes/src/test_scenes.rs +++ b/examples/scenes/src/test_scenes.rs @@ -125,6 +125,9 @@ fn stroke_styles(sb: &mut SceneBuilder, params: &mut SceneParams) { MoveTo((200., 0.).into()), CurveTo((100., 42.).into(), (300., 42.).into(), (200., 0.).into()), ClosePath, + MoveTo((290., 0.).into()), + CurveTo((200., 42.).into(), (400., 42.).into(), (310., 0.).into()), + ClosePath, ]; let cap_styles = [Cap::Butt, Cap::Square, Cap::Round]; let join_styles = [Join::Bevel, Join::Miter, Join::Round]; @@ -208,8 +211,6 @@ fn stroke_styles(sb: &mut SceneBuilder, params: &mut SceneParams) { } // Closed paths - let t = Affine::translate((500., 0.)) * t; - y = 0.; for (i, join) in join_styles.iter().enumerate() { params.text.add( sb, @@ -221,7 +222,10 @@ fn stroke_styles(sb: &mut SceneBuilder, params: &mut SceneParams) { ); // The cap style is not important since a closed path shouldn't have any caps. sb.stroke( - &Stroke::new(10.).with_caps(cap_styles[i]).with_join(*join), + &Stroke::new(10.) + .with_caps(cap_styles[i]) + .with_join(*join) + .with_miter_limit(5.), Affine::translate((0., y + 30.)) * t, colors[color_idx], None, From 83359c513d83f0be64657a5509a4ba3b1b65b077 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Wed, 8 Nov 2023 08:21:39 -0800 Subject: [PATCH 07/12] Minor cleanup in flatten.wgsl --- shader/flatten.wgsl | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index dcd656990..13f53f18f 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -89,18 +89,6 @@ fn eval_cubic(p0: vec2, p1: vec2, p2: vec2, p3: vec2, t: f32 return p0 * (mt * mt * mt) + (p1 * (mt * mt * 3.0) + (p2 * (mt * 3.0) + p3 * t) * t) * t; } -fn eval_cubic_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2, t: f32) -> vec2 { - let dp0 = 3. * (p1 - p0); - let dp1 = 3. * (p2 - p1); - let dp2 = 3. * (p3 - p2); - return eval_quad(dp0, dp1, dp2, t); -} - -fn eval_cubic_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2, t: f32) -> vec2 { - let tangent = eval_cubic_tangent(p0, p1, p2, p3, t); - return vec2(-tangent.y, tangent.x); -} - fn eval_quad_tangent(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec2 { let dp0 = 2. * (p1 - p0); let dp1 = 2. * (p2 - p1); @@ -210,6 +198,10 @@ fn flatten_cubic(cubic: Cubic) { lp1 = eval_quad(qp0, qp1, qp2, t); } + // TODO: Instead of outputting two offset segments here, restructure this function as + // "flatten_cubic_at_offset" such that it outputs one cubic at an offset. That should + // more closely resemble the end state of this shader which will work like a state + // machine. if cubic.flags == 1u { var n1: vec2f; if all(lp1 == p3) { @@ -437,6 +429,8 @@ fn main( } else { // Don't draw anything if the path is closed. } + // The stroke cap marker does not contribute to the path's bounding box. The stroke + // width is accounted for when computing the bbox for regular segments. bbox = vec4(1., 1., -1., -1.); } else { // Render offset curves From 1f5a93d22d75067ce96d18668a5c73b84d5098a1 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Wed, 8 Nov 2023 08:22:29 -0800 Subject: [PATCH 08/12] Keep GPU stroking off by default --- src/scene.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/scene.rs b/src/scene.rs index 0e42ccf1f..80f876792 100644 --- a/src/scene.rs +++ b/src/scene.rs @@ -149,7 +149,7 @@ impl<'a> SceneBuilder<'a> { brush_transform: Option, shape: &impl Shape, ) { - const GPU_STROKES: bool = true; + const GPU_STROKES: bool = false; if GPU_STROKES { // TODO: handle dashing by using a DashIterator self.scene From 24681fa94ef840dcc6c0793f42bbaa8d3be013c9 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Wed, 8 Nov 2023 08:46:36 -0800 Subject: [PATCH 09/12] Minor documentation and cleanup to zero-length tangent detection --- crates/encoding/src/path.rs | 24 +++++++++++++++--------- shader/flatten.wgsl | 8 +++++++- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/crates/encoding/src/path.rs b/crates/encoding/src/path.rs index d35edc3e1..6118ff414 100644 --- a/crates/encoding/src/path.rs +++ b/crates/encoding/src/path.rs @@ -525,9 +525,11 @@ impl<'a> PathEncoder<'a> { if self.state == PathState::MoveTo { let x0 = self.first_point[0]; let y0 = self.first_point[1]; - // TODO: should this be an exact match? - if (x - x0).abs() < 1e-12 && (y - y0).abs() < 1e-12 { + // Ensure that we don't end up with a zero-length start tangent. + const EPS: f32 = 1e-12; + if (x - x0).abs() < EPS && (y - y0).abs() < EPS { // Drop the segment if its length is zero + // TODO: do this for all not segments, not just start? return; } self.first_start_tangent_end = [x, y]; @@ -552,13 +554,15 @@ impl<'a> PathEncoder<'a> { if self.state == PathState::MoveTo { let x0 = self.first_point[0]; let y0 = self.first_point[1]; - // TODO clean this up - let (x, y) = if (x1 - x0).abs() > 1e-12 || (y1 - y0).abs() > 1e-12 { + // Ensure that we don't end up with a zero-length start tangent. + const EPS: f32 = 1e-12; + let (x, y) = if (x1 - x0).abs() > EPS || (y1 - y0).abs() > EPS { (x1, y1) - } else if (x2 - x0).abs() > 1e-12 || (y2 - y0).abs() > 1e-12 { + } else if (x2 - x0).abs() > EPS || (y2 - y0).abs() > EPS { (x2, y2) } else { // Drop the segment if its length is zero + // TODO: do this for all not segments, not just start? return; }; self.first_start_tangent_end = [x, y]; @@ -583,15 +587,17 @@ impl<'a> PathEncoder<'a> { if self.state == PathState::MoveTo { let x0 = self.first_point[0]; let y0 = self.first_point[1]; - // TODO clean this up - let (x, y) = if (x1 - x0).abs() > 1e-12 || (y1 - y0).abs() > 1e-12 { + // Ensure that we don't end up with a zero-length start tangent. + const EPS: f32 = 1e-12; + let (x, y) = if (x1 - x0).abs() > EPS || (y1 - y0).abs() > EPS { (x1, y1) - } else if (x2 - x0).abs() > 1e-12 || (y2 - y0).abs() > 1e-12 { + } else if (x2 - x0).abs() > EPS || (y2 - y0).abs() > EPS { (x2, y2) - } else if (x3 - x0).abs() > 1e-12 || (y3 - y0).abs() > 1e-12 { + } else if (x3 - x0).abs() > EPS || (y3 - y0).abs() > EPS { (x3, y3) } else { // Drop the segment if its length is zero + // TODO: do this for all not segments, not just start? return; }; self.first_start_tangent_end = [x, y]; diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index 13f53f18f..43e1afa53 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -328,7 +328,13 @@ fn read_path_segment(tag: PathTagData, transform: Transform, is_stroke: bool) -> } if is_stroke_cap_marker && is_open { - // TODO: document + // The stroke cap marker for an open path is encoded as a quadto where the p1 and p2 store + // the start control point of the subpath and together with p2 forms the start tangent. p0 + // is ignored. + // + // This is encoded this way because encoding this as a lineto would require adding a moveto, + // which would terminate the subpath too early (by setting the SUBPATH_END_BIT on the + // segment preceding the cap marker). This scheme is only used for strokes. p0 = transform_apply(transform, p1); p1 = transform_apply(transform, p2); seg_type = PATH_TAG_LINETO; From 3df413a03775d37e643ee366a75638e0a97fbc10 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Thu, 9 Nov 2023 21:45:38 -0800 Subject: [PATCH 10/12] [flatten] Settle on a consistent vector normalization scheme Make it so that tangent vectors are NOT normalized and normals are normalized. Vector helpers that derive normals from tangents now always return normalized vectors. This keeps things internally consistent. --- shader/flatten.wgsl | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index 43e1afa53..6c0fe674b 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -96,7 +96,7 @@ fn eval_quad_tangent(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec } fn eval_quad_normal(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec2 { - let tangent = eval_quad_tangent(p0, p1, p2, t); + let tangent = normalize(eval_quad_tangent(p0, p1, p2, t)); return vec2(-tangent.y, tangent.x); } @@ -117,12 +117,12 @@ fn cubic_end_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2) } fn cubic_start_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { - let tangent = cubic_start_tangent(p0, p1, p2, p3); + let tangent = normalize(cubic_start_tangent(p0, p1, p2, p3)); return vec2(-tangent.y, tangent.x); } fn cubic_end_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { - let tangent = cubic_end_tangent(p0, p1, p2, p3); + let tangent = normalize(cubic_end_tangent(p0, p1, p2, p3)); return vec2(-tangent.y, tangent.x); } @@ -163,8 +163,7 @@ fn flatten_cubic(cubic: Cubic) { } // HACK: normal vector used to offset line segments for shitty stroke handling. - var n0 = cubic_start_normal(p0, p1, p2, p3); - n0 = normalize(n0) * cubic.stroke; + var n0 = cubic_start_normal(p0, p1, p2, p3) * cubic.stroke; let n = max(u32(ceil(val * (0.5 / sqrt(REM_ACCURACY)))), 1u); var lp0 = p0; @@ -177,7 +176,6 @@ fn flatten_cubic(cubic: Cubic) { let qp2 = eval_cubic(p0, p1, p2, p3, t); var qp1 = eval_cubic(p0, p1, p2, p3, t - 0.5 * step); qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2); - let qp0_normal = eval_quad_normal(qp0, qp1, qp2, 0.); let params = keep_params[i]; let u0 = approx_parabola_inv_integral(params.a0); let u2 = approx_parabola_inv_integral(params.a2); @@ -209,7 +207,7 @@ fn flatten_cubic(cubic: Cubic) { } else { n1 = eval_quad_normal(qp0, qp1, qp2, t1); } - n1 = normalize(n1) * cubic.stroke; + n1 *= cubic.stroke; let line_ix = atomicAdd(&bump.lines, 2u); lines[line_ix] = LineSoup(cubic.path_ix, lp0 + n0, lp1 + n1); lines[line_ix + 1u] = LineSoup(cubic.path_ix, lp1 - n1, lp0 - n0); @@ -366,7 +364,7 @@ struct NeighboringSegment { do_join: bool, p0: vec2f, - // Normalized device-space start tangent vector + // Device-space start tangent vector tangent: vec2f, } @@ -380,7 +378,7 @@ fn read_neighboring_segment(ix: u32) -> NeighboringSegment { let do_join = !is_stroke_cap_marker || is_closed; let p0 = pts.p0; let tangent = cubic_start_tangent(pts.p0, pts.p1, pts.p2, pts.p3); - return NeighboringSegment(do_join, p0, normalize(tangent)); + return NeighboringSegment(do_join, p0, tangent); } @compute @workgroup_size(256) @@ -426,8 +424,7 @@ fn main( let is_stroke_cap_marker = (tag.tag_byte & PATH_TAG_SUBPATH_END_BIT) != 0u; if is_stroke_cap_marker { if is_open { - let tangent = normalize(pts.p1 - pts.p0); - let n = vec2f(-tangent.y, tangent.x) * stroke; + let n = cubic_start_normal(pts.p0, pts.p1, pts.p2, pts.p3) * stroke; // Draw start cap let line_ix = atomicAdd(&bump.lines, 1u); @@ -447,7 +444,7 @@ fn main( let n = normalize(cubic_end_normal(pts.p0, pts.p1, pts.p2, pts.p3)) * stroke; if neighbor.do_join { // Draw join. - let nn = vec2(-neighbor.tangent.y, neighbor.tangent.x) * stroke; + let nn = normalize(vec2(-neighbor.tangent.y, neighbor.tangent.x)) * stroke; let line_ix = atomicAdd(&bump.lines, 2u); lines[line_ix] = LineSoup(path_ix, pts.p3 + n, neighbor.p0 + nn); lines[line_ix + 1u] = LineSoup(path_ix, neighbor.p0 - nn, pts.p3 - n); From eded0a1dcd99a9e1a4421992237fe14a1e27f957 Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Mon, 13 Nov 2023 13:57:36 -0800 Subject: [PATCH 11/12] Address review comments --- crates/encoding/src/path.rs | 76 ++++++++++++++++++++++--------------- shader/flatten.wgsl | 14 +++---- shader/shared/pathtag.wgsl | 6 +-- 3 files changed, 56 insertions(+), 40 deletions(-) diff --git a/crates/encoding/src/path.rs b/crates/encoding/src/path.rs index 6118ff414..ed9cd99e5 100644 --- a/crates/encoding/src/path.rs +++ b/crates/encoding/src/path.rs @@ -444,10 +444,10 @@ impl<'a> PathEncoder<'a> { /// segment tells the GPU stroker whether to draw a cap or a join based on the topology of the /// path: /// - /// 1. This marker segment is encoded as a `quad-to` for an open path and a `line-to` for a - /// closed path. An open path gets drawn with a start and end cap. A closed path gets drawn - /// with a single join in place of the caps where the subpath's start and end control points - /// meet. + /// 1. This marker segment is encoded as a `quad-to` (2 additional points) for an open path and + /// a `line-to` (1 additional point) for a closed path. An open path gets drawn with a start + /// and end cap. A closed path gets drawn with a single join in place of the caps where the + /// subpath's start and end control points meet. /// /// 2. The marker segment tells the GPU flattening stage how to render caps and joins while /// processing each path segment in parallel. All subpaths end with the marker segment which @@ -523,15 +523,13 @@ impl<'a> PathEncoder<'a> { self.move_to(self.first_point[0], self.first_point[1]); } if self.state == PathState::MoveTo { - let x0 = self.first_point[0]; - let y0 = self.first_point[1]; + let p0 = (self.first_point[0], self.first_point[1]); // Ensure that we don't end up with a zero-length start tangent. - const EPS: f32 = 1e-12; - if (x - x0).abs() < EPS && (y - y0).abs() < EPS { + let Some((x, y)) = start_tangent_for_curve(p0, (x, y), p0, p0) else { // Drop the segment if its length is zero - // TODO: do this for all not segments, not just start? + // TODO: do this for all not segments, not just start. return; - } + }; self.first_start_tangent_end = [x, y]; } let buf = [x, y]; @@ -552,17 +550,11 @@ impl<'a> PathEncoder<'a> { self.move_to(self.first_point[0], self.first_point[1]); } if self.state == PathState::MoveTo { - let x0 = self.first_point[0]; - let y0 = self.first_point[1]; + let p0 = (self.first_point[0], self.first_point[1]); // Ensure that we don't end up with a zero-length start tangent. - const EPS: f32 = 1e-12; - let (x, y) = if (x1 - x0).abs() > EPS || (y1 - y0).abs() > EPS { - (x1, y1) - } else if (x2 - x0).abs() > EPS || (y2 - y0).abs() > EPS { - (x2, y2) - } else { + let Some((x, y)) = start_tangent_for_curve(p0, (x1, y1), (x2, y2), p0) else { // Drop the segment if its length is zero - // TODO: do this for all not segments, not just start? + // TODO: do this for all not segments, not just start. return; }; self.first_start_tangent_end = [x, y]; @@ -585,19 +577,11 @@ impl<'a> PathEncoder<'a> { self.move_to(self.first_point[0], self.first_point[1]); } if self.state == PathState::MoveTo { - let x0 = self.first_point[0]; - let y0 = self.first_point[1]; + let p0 = (self.first_point[0], self.first_point[1]); // Ensure that we don't end up with a zero-length start tangent. - const EPS: f32 = 1e-12; - let (x, y) = if (x1 - x0).abs() > EPS || (y1 - y0).abs() > EPS { - (x1, y1) - } else if (x2 - x0).abs() > EPS || (y2 - y0).abs() > EPS { - (x2, y2) - } else if (x3 - x0).abs() > EPS || (y3 - y0).abs() > EPS { - (x3, y3) - } else { + let Some((x, y)) = start_tangent_for_curve(p0, (x1, y1), (x2, y2), (x3, y3)) else { // Drop the segment if its length is zero - // TODO: do this for all not segments, not just start? + // TODO: do this for all not segments, not just start. return; }; self.first_start_tangent_end = [x, y]; @@ -739,6 +723,38 @@ impl fello::scale::Pen for PathEncoder<'_> { } } +// Returns the end point of the start tangent of a curve starting at `(x0, y0)`, or `None` if the +// curve is degenerate / has zero-length. The inputs are a sequence of control points that can +// represent a line, a quadratic Bezier, or a cubic Bezier. Lines and quadratic Beziers can be +// passed to this function by simply setting the invalid control point degrees equal to `(x0, y0)`. +fn start_tangent_for_curve( + p0: (f32, f32), + p1: (f32, f32), + p2: (f32, f32), + p3: (f32, f32), +) -> Option<(f32, f32)> { + debug_assert!(!p0.0.is_nan()); + debug_assert!(!p0.1.is_nan()); + debug_assert!(!p1.0.is_nan()); + debug_assert!(!p1.1.is_nan()); + debug_assert!(!p2.0.is_nan()); + debug_assert!(!p2.1.is_nan()); + debug_assert!(!p3.0.is_nan()); + debug_assert!(!p3.1.is_nan()); + + const EPS: f32 = 1e-12; + let pt = if (p1.0 - p0.0).abs() > EPS || (p1.1 - p0.1).abs() > EPS { + p1 + } else if (p2.0 - p0.0).abs() > EPS || (p2.1 - p0.1).abs() > EPS { + p2 + } else if (p3.0 - p0.0).abs() > EPS || (p3.1 - p0.1).abs() > EPS { + p3 + } else { + return None; + }; + Some(pt) +} + #[cfg(test)] mod tests { use super::*; diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index 6c0fe674b..d22d89e35 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -153,7 +153,7 @@ fn flatten_cubic(cubic: Cubic) { // HACK: this increase subdivision count as function of the stroke width for shitty strokes. var tol = sqrt(REM_ACCURACY); - if cubic.flags == 1u { + if cubic.flags == CUBIC_IS_STROKE { tol *= min(1000., dot(cubic.stroke, cubic.stroke)); } let params = estimate_subdiv(qp0, qp1, qp2, tol); @@ -302,7 +302,7 @@ fn read_path_segment(tag: PathTagData, transform: Transform, is_stroke: bool) -> var seg_type = tag.tag_byte & PATH_TAG_SEG_TYPE; let pathseg_offset = tag.monoid.pathseg_offset; - let is_stroke_cap_marker = is_stroke && (tag.tag_byte & PATH_TAG_SUBPATH_END_BIT) != 0u; + let is_stroke_cap_marker = is_stroke && (tag.tag_byte & PATH_TAG_SUBPATH_END) != 0u; let is_open = seg_type == PATH_TAG_QUADTO; if (tag.tag_byte & PATH_TAG_F32) != 0u { @@ -331,7 +331,7 @@ fn read_path_segment(tag: PathTagData, transform: Transform, is_stroke: bool) -> // is ignored. // // This is encoded this way because encoding this as a lineto would require adding a moveto, - // which would terminate the subpath too early (by setting the SUBPATH_END_BIT on the + // which would terminate the subpath too early (by setting the SUBPATH_END on the // segment preceding the cap marker). This scheme is only used for strokes. p0 = transform_apply(transform, p1); p1 = transform_apply(transform, p2); @@ -374,7 +374,7 @@ fn read_neighboring_segment(ix: u32) -> NeighboringSegment { let pts = read_path_segment(tag, transform, true); let is_closed = (tag.tag_byte & PATH_TAG_SEG_TYPE) == PATH_TAG_LINETO; - let is_stroke_cap_marker = (tag.tag_byte & PATH_TAG_SUBPATH_END_BIT) != 0u; + let is_stroke_cap_marker = (tag.tag_byte & PATH_TAG_SUBPATH_END) != 0u; let do_join = !is_stroke_cap_marker || is_closed; let p0 = pts.p0; let tangent = cubic_start_tangent(pts.p0, pts.p1, pts.p2, pts.p3); @@ -397,7 +397,7 @@ fn main( let out = &path_bboxes[path_ix]; let style_flags = scene[config.style_base + style_ix]; // The fill bit is always set to 0 for strokes which represents a non-zero fill. - let draw_flags = select(DRAW_INFO_FLAGS_FILL_RULE_BIT, 0u, (style_flags & STYLE_FLAGS_FILL_BIT) == 0u); + let draw_flags = select(DRAW_INFO_FLAGS_FILL_RULE_BIT, 0u, (style_flags & STYLE_FLAGS_FILL) == 0u); if (tag.tag_byte & PATH_TAG_PATH) != 0u { (*out).draw_flags = draw_flags; (*out).trans_ix = trans_ix; @@ -405,7 +405,7 @@ fn main( // Decode path data let seg_type = tag.tag_byte & PATH_TAG_SEG_TYPE; if seg_type != 0u { - let is_stroke = (style_flags & STYLE_FLAGS_STYLE_BIT) != 0u; + let is_stroke = (style_flags & STYLE_FLAGS_STYLE) != 0u; let transform = read_transform(config.transform_base, trans_ix); let pts = read_path_segment(tag, transform, is_stroke); var bbox = vec4(min(pts.p0, pts.p1), max(pts.p0, pts.p1)); @@ -421,7 +421,7 @@ fn main( stroke = 0.5 * linewidth * vec2(length(transform.mat.xz), length(transform.mat.yw)); bbox += vec4(-stroke, stroke); let is_open = (tag.tag_byte & PATH_TAG_SEG_TYPE) != PATH_TAG_LINETO; - let is_stroke_cap_marker = (tag.tag_byte & PATH_TAG_SUBPATH_END_BIT) != 0u; + let is_stroke_cap_marker = (tag.tag_byte & PATH_TAG_SUBPATH_END) != 0u; if is_stroke_cap_marker { if is_open { let n = cubic_start_normal(pts.p0, pts.p1, pts.p2, pts.p3) * stroke; diff --git a/shader/shared/pathtag.wgsl b/shader/shared/pathtag.wgsl index 37f0235ed..86f2565ae 100644 --- a/shader/shared/pathtag.wgsl +++ b/shader/shared/pathtag.wgsl @@ -21,12 +21,12 @@ let PATH_TAG_TRANSFORM = 0x20u; let PATH_TAG_PATH = 0x10u; let PATH_TAG_STYLE = 0x40u; #endif -let PATH_TAG_SUBPATH_END_BIT = 4u; +let PATH_TAG_SUBPATH_END = 4u; // Size of the `Style` data structure in words let STYLE_SIZE_IN_WORDS: u32 = 2u; -let STYLE_FLAGS_STYLE_BIT: u32 = 0x80000000u; -let STYLE_FLAGS_FILL_BIT: u32 = 0x40000000u; +let STYLE_FLAGS_STYLE: u32 = 0x80000000u; +let STYLE_FLAGS_FILL: u32 = 0x40000000u; // TODO: Declare the remaining STYLE flags here. From 7a122b64527a1a7081de968ee41c95efef6b411c Mon Sep 17 00:00:00 2001 From: Arman Uguray Date: Mon, 13 Nov 2023 15:39:07 -0800 Subject: [PATCH 12/12] Style and comment fixes in the WGSL * Fix the phrasing in a comment regarding subdivision count * Use the vec4f and vec2f predeclared aliases for vec4 and vec2 in `flatten`. I've been consistent with this style for new code in this shader and I may make the other shaders use the alises in a follow up since it's more concise. --- shader/flatten.wgsl | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/shader/flatten.wgsl b/shader/flatten.wgsl index d22d89e35..d15dcf8c4 100644 --- a/shader/flatten.wgsl +++ b/shader/flatten.wgsl @@ -52,7 +52,7 @@ fn approx_parabola_inv_integral(x: f32) -> f32 { return x * sqrt(1.0 - B + (B * B + 0.5 * x * x)); } -fn estimate_subdiv(p0: vec2, p1: vec2, p2: vec2, sqrt_tol: f32) -> SubdivResult { +fn estimate_subdiv(p0: vec2f, p1: vec2f, p2: vec2f, sqrt_tol: f32) -> SubdivResult { let d01 = p1 - p0; let d12 = p2 - p1; let dd = d01 - d12; @@ -79,28 +79,28 @@ fn estimate_subdiv(p0: vec2, p1: vec2, p2: vec2, sqrt_tol: f32) - return SubdivResult(val, a0, a2); } -fn eval_quad(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec2 { +fn eval_quad(p0: vec2f, p1: vec2f, p2: vec2f, t: f32) -> vec2f { let mt = 1.0 - t; return p0 * (mt * mt) + (p1 * (mt * 2.0) + p2 * t) * t; } -fn eval_cubic(p0: vec2, p1: vec2, p2: vec2, p3: vec2, t: f32) -> vec2 { +fn eval_cubic(p0: vec2f, p1: vec2f, p2: vec2f, p3: vec2f, t: f32) -> vec2f { let mt = 1.0 - t; return p0 * (mt * mt * mt) + (p1 * (mt * mt * 3.0) + (p2 * (mt * 3.0) + p3 * t) * t) * t; } -fn eval_quad_tangent(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec2 { +fn eval_quad_tangent(p0: vec2f, p1: vec2f, p2: vec2f, t: f32) -> vec2f { let dp0 = 2. * (p1 - p0); let dp1 = 2. * (p2 - p1); return mix(dp0, dp1, t); } -fn eval_quad_normal(p0: vec2, p1: vec2, p2: vec2, t: f32) -> vec2 { +fn eval_quad_normal(p0: vec2f, p1: vec2f, p2: vec2f, t: f32) -> vec2f { let tangent = normalize(eval_quad_tangent(p0, p1, p2, t)); return vec2(-tangent.y, tangent.x); } -fn cubic_start_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { +fn cubic_start_tangent(p0: vec2f, p1: vec2f, p2: vec2f, p3: vec2f) -> vec2f { let EPS = 1e-12; let d01 = p1 - p0; let d02 = p2 - p0; @@ -108,7 +108,7 @@ fn cubic_start_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2 EPS), d01, dot(d01, d01) > EPS); } -fn cubic_end_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { +fn cubic_end_tangent(p0: vec2f, p1: vec2f, p2: vec2f, p3: vec2f) -> vec2f { let EPS = 1e-12; let d23 = p3 - p2; let d13 = p3 - p1; @@ -116,12 +116,12 @@ fn cubic_end_tangent(p0: vec2, p1: vec2, p2: vec2, p3: vec2) return select(select(d03, d13, dot(d13, d13) > EPS), d23, dot(d23, d23) > EPS); } -fn cubic_start_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { +fn cubic_start_normal(p0: vec2f, p1: vec2f, p2: vec2f, p3: vec2f) -> vec2f { let tangent = normalize(cubic_start_tangent(p0, p1, p2, p3)); return vec2(-tangent.y, tangent.x); } -fn cubic_end_normal(p0: vec2, p1: vec2, p2: vec2, p3: vec2) -> vec2 { +fn cubic_end_normal(p0: vec2f, p1: vec2f, p2: vec2f, p3: vec2f) -> vec2f { let tangent = normalize(cubic_end_tangent(p0, p1, p2, p3)); return vec2(-tangent.y, tangent.x); } @@ -151,7 +151,8 @@ fn flatten_cubic(cubic: Cubic) { var qp1 = eval_cubic(p0, p1, p2, p3, t - 0.5 * step); qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2); - // HACK: this increase subdivision count as function of the stroke width for shitty strokes. + // HACK: this increases subdivision count as a function of the stroke width for shitty + // strokes. This isn't systematic or correct and shouldn't be relied on in the long term. var tol = sqrt(REM_ACCURACY); if cubic.flags == CUBIC_IS_STROKE { tol *= min(1000., dot(cubic.stroke, cubic.stroke)); @@ -182,7 +183,7 @@ fn flatten_cubic(cubic: Cubic) { let uscale = 1.0 / (u2 - u0); var val_target = f32(n_out) * v_step; while n_out == n || val_target < val_sum + params.val { - var lp1: vec2; + var lp1: vec2f; var t1: f32; if n_out == n { lp1 = p3; @@ -229,13 +230,13 @@ fn flatten_cubic(cubic: Cubic) { var pathdata_base: u32; -fn read_f32_point(ix: u32) -> vec2 { +fn read_f32_point(ix: u32) -> vec2f { let x = bitcast(scene[pathdata_base + ix]); let y = bitcast(scene[pathdata_base + ix + 1u]); return vec2(x, y); } -fn read_i16_point(ix: u32) -> vec2 { +fn read_i16_point(ix: u32) -> vec2f { let raw = scene[pathdata_base + ix]; let x = f32(i32(raw << 16u) >> 16u); let y = f32(i32(raw) >> 16u); @@ -243,8 +244,8 @@ fn read_i16_point(ix: u32) -> vec2 { } struct Transform { - mat: vec4, - translate: vec2, + mat: vec4f, + translate: vec2f, } fn read_transform(transform_base: u32, ix: u32) -> Transform { @@ -260,7 +261,7 @@ fn read_transform(transform_base: u32, ix: u32) -> Transform { return Transform(mat, translate); } -fn transform_apply(transform: Transform, p: vec2) -> vec2 { +fn transform_apply(transform: Transform, p: vec2f) -> vec2f { return transform.mat.xy * p.x + transform.mat.zw * p.y + transform.translate; } @@ -295,10 +296,10 @@ struct CubicPoints { } fn read_path_segment(tag: PathTagData, transform: Transform, is_stroke: bool) -> CubicPoints { - var p0: vec2; - var p1: vec2; - var p2: vec2; - var p3: vec2; + var p0: vec2f; + var p1: vec2f; + var p2: vec2f; + var p3: vec2f; var seg_type = tag.tag_byte & PATH_TAG_SEG_TYPE; let pathseg_offset = tag.monoid.pathseg_offset;