From ece27966bd4a820b825264b471765004c1a2b803 Mon Sep 17 00:00:00 2001 From: Jesse Date: Thu, 25 Mar 2021 11:54:07 -0400 Subject: [PATCH] self-voicing -> voicing in many comments, see https://github.com/phetsims/tasks/issues/1083 --- js/accessibility/speaker/VoicingHighlight.js | 4 ++-- .../speaker/VoicingInputListener.js | 8 +++---- .../speaker/VoicingPreferencesDialog.js | 18 +++++++-------- .../speaker/VoicingQuickControl.js | 22 +++++++++---------- .../speaker/VoicingWrapperNode.js | 2 +- js/accessibility/speaker/levelSpeakerModel.js | 4 ++-- .../speaker/speakerHighlighter.js | 6 ++--- 7 files changed, 32 insertions(+), 32 deletions(-) diff --git a/js/accessibility/speaker/VoicingHighlight.js b/js/accessibility/speaker/VoicingHighlight.js index 8e40f0b79..26932c8e9 100644 --- a/js/accessibility/speaker/VoicingHighlight.js +++ b/js/accessibility/speaker/VoicingHighlight.js @@ -1,13 +1,13 @@ // Copyright 2020, University of Colorado Boulder /** - * A focus highlight for the self-voicing prototype. Has a different color than the + * A focus highlight for the voicing prototype. Has a different color than the * default focus highlight and includes an icon to indicate that interacting with * the object will result in some speech. This highlight may also appear on * mouse over as well as focus in the SpeakerHighlighter * * This should generally be used for otherwise NON interactive things that - * have self-voicing. Normally focusable things should have the default + * have voicing. Normally focusable things should have the default * focus highlight. * @author Jesse Greenberg */ diff --git a/js/accessibility/speaker/VoicingInputListener.js b/js/accessibility/speaker/VoicingInputListener.js index 01dc3fe97..a1f203146 100644 --- a/js/accessibility/speaker/VoicingInputListener.js +++ b/js/accessibility/speaker/VoicingInputListener.js @@ -1,9 +1,9 @@ // Copyright 2020, University of Colorado Boulder /** - * A basic listener to assist with the self-voicing project. For a particular Node, you + * A basic listener to assist with the voicing project. For a particular Node, you * can specify what should happen on the various input methods that may trigger some - * self-voicing content. + * voicing content. * * PROTOTYPE! Not to be used in production code! * @@ -32,7 +32,7 @@ class VoicingInputListener { // {function} - called when the Node receives the focus event onFocusIn: () => {}, - // the Node represented for self-voicing - state of this Node will + // the Node represented for voicing - state of this Node will // control the output of speech. For instance, if the representedNode // is not visible in a display no speech will be generated on // various input. If null, no such checks will be made @@ -44,7 +44,7 @@ class VoicingInputListener { highlightTarget: null }, options ); - assert && assert( phet.joist.sim.voicingUtteranceQueue, 'Listener requires the utteranceQueue for self-voicing, is the feature enabled?' ); + assert && assert( phet.joist.sim.voicingUtteranceQueue, 'Listener requires the utteranceQueue for voicing, is the feature enabled?' ); // @private - see options this.onPress = options.onPress; diff --git a/js/accessibility/speaker/VoicingPreferencesDialog.js b/js/accessibility/speaker/VoicingPreferencesDialog.js index cc6996079..aec3ef967 100644 --- a/js/accessibility/speaker/VoicingPreferencesDialog.js +++ b/js/accessibility/speaker/VoicingPreferencesDialog.js @@ -1,8 +1,8 @@ // Copyright 2020, University of Colorado Boulder /** - * Content for an "Options" dialog, only used if the ?selfVoicing query parameter is used to explore prototype - * "self voicing" feature set. This dialog allows control of output verbosity and settings for the speech synthesizer. + * Content for an "Options" dialog, only used if the ?supportsVoicing query parameter is used to explore prototype + * "voicing" feature set. This dialog allows control of output verbosity and settings for the speech synthesizer. * * PROTOTYPE! Do not use in production code. * @@ -41,7 +41,7 @@ const objectChangesString = 'Voice Object Changes & On-Screen Text'; const contextChangesString = 'Voice Context Changes'; const hintsString = 'Voice Helpful Hints'; const interactiveHighlightsString = 'Show Interactive Highlights'; -const quickMenuString = 'Show Self Voicing Quick Menu'; +const quickMenuString = 'Show Voicing Quick Menu'; const gestureControlString = 'Enable Gesture Control'; const generalOptionsString = 'General Options'; const outputString = 'Speech Output Levels'; @@ -53,8 +53,8 @@ const newVoiceRateString = 'New Voice Rate'; const newVoicePitchString = 'New Voice Pitch'; const highlightsShownString = 'Interactive Highlights Shown'; const highlightsHiddenString = 'Interactive Highlights hidden'; -const menuShownString = 'Self Voicing Quick Menu Shown'; -const menuHiddenString = 'Self Voicing Quick Menu Hidden'; +const menuShownString = 'Voicing Quick Menu Shown'; +const menuHiddenString = 'Voicing Quick Menu Hidden'; const gestureControlsEnabledString = 'Custom gesture controls enabled.'; const gestureControlsDisabledString = 'Custom gesture controls disabled.'; const speakingObjectChangesString = 'Speaking object changes and on screen text.'; @@ -90,7 +90,7 @@ class VoicingPreferencesDialog extends Dialog { // Hack alert - The listBox is private but we need to be able to speak the selected voice. // This is just a proof of concept so I would rather hack this here than put in ComboBox/ - // ComboBoxListItemNode. self-voicing ends up being a long term feature we will move this + // ComboBoxListItemNode. voicing ends up being a long term feature we will move this // kind of thing to those types with more consideration. voiceComboBox.listBox.addInputListener( new VoicingInputListener( { onFocusIn: event => { @@ -144,10 +144,10 @@ class VoicingPreferencesDialog extends Dialog { // @private // @static -VoicingPreferencesDialog.createLabelledSlider = ( numberProperty, label, selfVoicingLabel, changeSuccessDescription ) => { +VoicingPreferencesDialog.createLabelledSlider = ( numberProperty, label, voicingLabel, changeSuccessDescription ) => { const slider = new GestureControlledSlider( numberProperty, numberProperty.range, { - selfVoicingLabel: selfVoicingLabel + voicingLabel: voicingLabel } ); const utterance = new VoicingUtterance( { @@ -247,7 +247,7 @@ class LevelModeControls extends VBox { spacing: 20 } ); - // self-voicing behavior for the checkboxes themselves + // voicing behavior for the checkboxes themselves levelsCheckboxGroup.children.forEach( child => { child.addInputListener( new VoicingInputListener( { onFocusIn: () => { diff --git a/js/accessibility/speaker/VoicingQuickControl.js b/js/accessibility/speaker/VoicingQuickControl.js index 0da6f2dc1..4a8abfd4d 100644 --- a/js/accessibility/speaker/VoicingQuickControl.js +++ b/js/accessibility/speaker/VoicingQuickControl.js @@ -1,7 +1,7 @@ // Copyright 2020, University of Colorado Boulder /** - * Controls that appear if self-voicing content is enabled. Allows user to mute all speech. + * Controls that appear if voicing content is enabled. Allows user to mute all speech. * Also has buttons to read other content. * * This is a prototype, and is still under active design and development. @@ -34,7 +34,7 @@ import sceneryPhet from '../../sceneryPhet.js'; import levelSpeakerModel from './levelSpeakerModel.js'; import VoicingInputListener from './VoicingInputListener.js'; -// strings for self-voicing content - these should not be translatable and are therefore not +// strings for voicing content - these should not be translatable and are therefore not // added to the strings file - I also don't know if "prototype" strings can go into translatable files // so keeping these here for now const hintPleaseString = 'Hint Please!'; @@ -45,10 +45,10 @@ const muteSpeechString = 'Mute Speech'; const hideString = 'Hide'; const showString = 'Show'; const preferencesString = 'Preferences'; -const expandCollapseButtonPatternString = '{{action}} Self-Voicing Quick Menu'; -const selfVoicingQuickMenuShown = 'Read-me buttons & speech controls shown.'; -const selfVoicingQuickMenuHidden = 'Self-Voicing Quick Menu hidden.'; -const selfVoicingDialogAlert = 'Self Voicing Preferences shown.'; +const expandCollapseButtonPatternString = '{{action}} Voicing Quick Menu'; +const voicingQuickMenuShown = 'Read-me buttons & speech controls shown.'; +const voicingQuickMenuHidden = 'Voicing Quick Menu hidden.'; +const voicingDialogAlert = 'Voicing Preferences shown.'; class VoicingQuickControl extends Node { @@ -60,11 +60,11 @@ class VoicingQuickControl extends Node { options = merge( { - // {function} - Returns string, callback that creates the content for a self-voicing hint when the + // {function} - Returns string, callback that creates the content for a voicing hint when the // hint button is pressed createHintContent: () => '', - // {function} - Returns string, callback that creates the content for a self-voicing overview when the + // {function} - Returns string, callback that creates the content for a voicing overview when the // overview button is pressed createOverviewContent: () => '', @@ -121,7 +121,7 @@ class VoicingQuickControl extends Node { } ) ); openProperty.lazyLink( open => { - const response = open ? selfVoicingQuickMenuShown : selfVoicingQuickMenuHidden; + const response = open ? voicingQuickMenuShown : voicingQuickMenuHidden; phet.joist.sim.voicingUtteranceQueue.addToBack( levelSpeakerModel.collectResponses( response ) ); } ); @@ -186,7 +186,7 @@ class VoicingQuickControl extends Node { // object response describing the open dialog - polite so the first focusable element // to be described const utterance = new VoicingUtterance( { - alert: selfVoicingDialogAlert, + alert: voicingDialogAlert, cancelOther: false } ); phet.joist.sim.voicingUtteranceQueue.addToBack( utterance ); @@ -249,7 +249,7 @@ class VoicingQuickControl extends Node { } ); // the quick menu can be hidden independently from user settings (the speech icon remains - // visible to indicate that self-voicing is enabled, but the menu button is removed) + // visible to indicate that voicing is enabled, but the menu button is removed) levelSpeakerModel.showQuickMenuProperty.link( visible => { // close the menu if we are making the button invisible diff --git a/js/accessibility/speaker/VoicingWrapperNode.js b/js/accessibility/speaker/VoicingWrapperNode.js index 32e300055..96b3b4a77 100644 --- a/js/accessibility/speaker/VoicingWrapperNode.js +++ b/js/accessibility/speaker/VoicingWrapperNode.js @@ -1,7 +1,7 @@ // Copyright 2020, University of Colorado Boulder /** * Wraps a Node with another that is better for hit testing for the purpses - * of the self-voicing prototype. Also adds a VoicingInputListener to the + * of the voicing prototype. Also adds a VoicingInputListener to the * Node so that it creates speech and highlighting * * @author Jesse Greenberg diff --git a/js/accessibility/speaker/levelSpeakerModel.js b/js/accessibility/speaker/levelSpeakerModel.js index 3f7b227bd..b1b0e5ce5 100644 --- a/js/accessibility/speaker/levelSpeakerModel.js +++ b/js/accessibility/speaker/levelSpeakerModel.js @@ -1,7 +1,7 @@ // Copyright 2020, University of Colorado Boulder /** - * A model for the "Speaking Levels" prototype of the self-voicing output. User can layer on different levels + * A model for the "Speaking Levels" prototype of the voicing output. User can layer on different levels * of helpful output. See the Properties below for the kinds of output that is added on in each level. * * This is a singleton model as it controls output for the entire simulation. @@ -29,7 +29,7 @@ class LevelSpeakerModel { // appears around interactive nodes from mouse hover this.showHoverHighlightsProperty = new BooleanProperty( true ); - // @public {BooleanProperty} - whether or not the "Self-Voicing Quick Menu" is visible + // @public {BooleanProperty} - whether or not the "Voicing Quick Menu" is visible // and available to the user this.showQuickMenuProperty = new BooleanProperty( true ); diff --git a/js/accessibility/speaker/speakerHighlighter.js b/js/accessibility/speaker/speakerHighlighter.js index 31781ea75..f2a4617c2 100644 --- a/js/accessibility/speaker/speakerHighlighter.js +++ b/js/accessibility/speaker/speakerHighlighter.js @@ -1,7 +1,7 @@ // Copyright 2020, University of Colorado Boulder /** - * Manages highlights to indicate the state of self-voicing speech, as well as what objects have self-voicing content. + * Manages highlights to indicate the state of voicing speech, as well as what objects have voicing content. * This is coupled with VoicingInputListener, which updates the Properties tracking the pointer's over Trail. * * Very rough, prototype code. Uncertain whether this design will be around long-term. @@ -82,7 +82,7 @@ class SpeakerHighlighter { options.display.addInputListener( { down: event => { - // in the self-voicing prototype we want the focus highlight to remain with + // in the voicing prototype we want the focus highlight to remain with // mouse/touch presses, only if 'interactive highlights' or custom gestures are enabled if ( !levelSpeakerModel.showHoverHighlightsProperty.get() && !levelSpeakerModel.gestureControlProperty.get() ) { Display.focus = null; @@ -90,7 +90,7 @@ class SpeakerHighlighter { } } ); - // activate highlights for self-voicing + // activate highlights for voicing Property.multilink( [ this.overTrailProperty, this.speakingTrailProperty ], ( overTrail, speakingTrail ) => { if ( this.enabledProperty.get() ) {