Skip to content

Commit

Permalink
Spelling (#9778)
Browse files Browse the repository at this point in the history
* spelling: aggressive

Signed-off-by: Josh Soref <[email protected]>

* spelling: been

Signed-off-by: Josh Soref <[email protected]>

* spelling: concur

Signed-off-by: Josh Soref <[email protected]>

* spelling: concurrency

Signed-off-by: Josh Soref <[email protected]>

* spelling: conditions

Signed-off-by: Josh Soref <[email protected]>

* spelling: configuration

Signed-off-by: Josh Soref <[email protected]>

* spelling: configures

Signed-off-by: Josh Soref <[email protected]>

* spelling: controller

Signed-off-by: Josh Soref <[email protected]>

* spelling: constraints

Signed-off-by: Josh Soref <[email protected]>

* spelling: decrease

Signed-off-by: Josh Soref <[email protected]>

* spelling: decisions

Signed-off-by: Josh Soref <[email protected]>

* spelling: determine

Signed-off-by: Josh Soref <[email protected]>

* spelling: endpoints

Signed-off-by: Josh Soref <[email protected]>

* spelling: exactly

Signed-off-by: Josh Soref <[email protected]>

* spelling: immediately

Signed-off-by: Josh Soref <[email protected]>

* spelling: individual

Signed-off-by: Josh Soref <[email protected]>

* spelling: install

Signed-off-by: Josh Soref <[email protected]>

* spelling: initial

Signed-off-by: Josh Soref <[email protected]>

* spelling: manager

Signed-off-by: Josh Soref <[email protected]>

* spelling: mayonnaise

Signed-off-by: Josh Soref <[email protected]>

* spelling: namespace

Signed-off-by: Josh Soref <[email protected]>

* spelling: outside

Signed-off-by: Josh Soref <[email protected]>

* spelling: overwrite

Signed-off-by: Josh Soref <[email protected]>

* spelling: panicking

Signed-off-by: Josh Soref <[email protected]>

* spelling: percentage

Signed-off-by: Josh Soref <[email protected]>

* spelling: provided

Signed-off-by: Josh Soref <[email protected]>

* spelling: processes

Signed-off-by: Josh Soref <[email protected]>

* spelling: properties

Signed-off-by: Josh Soref <[email protected]>

* spelling: protocol

Signed-off-by: Josh Soref <[email protected]>

* spelling: recorder

Signed-off-by: Josh Soref <[email protected]>

* spelling: reinstalling

Signed-off-by: Josh Soref <[email protected]>

* spelling: remnants

Signed-off-by: Josh Soref <[email protected]>

* spelling: request

Signed-off-by: Josh Soref <[email protected]>

* spelling: return

Signed-off-by: Josh Soref <[email protected]>

* spelling: requests

Signed-off-by: Josh Soref <[email protected]>

* spelling: revision

Signed-off-by: Josh Soref <[email protected]>

* spelling: stdin

Signed-off-by: Josh Soref <[email protected]>

* spelling: sidecar

Signed-off-by: Josh Soref <[email protected]>

* spelling: stabilize

Signed-off-by: Josh Soref <[email protected]>

* spelling: stabilized

Signed-off-by: Josh Soref <[email protected]>

* spelling: startup

Signed-off-by: Josh Soref <[email protected]>

* spelling: statmessage

Signed-off-by: Josh Soref <[email protected]>

* spelling: verifying

Signed-off-by: Josh Soref <[email protected]>

* spelling: windermere

Signed-off-by: Josh Soref <[email protected]>

* spelling: windows

Signed-off-by: Josh Soref <[email protected]>

* chore: update hashes

Signed-off-by: Josh Soref <[email protected]>
  • Loading branch information
jsoref authored Oct 16, 2020
1 parent dad97fb commit 1c8ec8f
Show file tree
Hide file tree
Showing 54 changed files with 82 additions and 82 deletions.
2 changes: 1 addition & 1 deletion cmd/queue/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func TestQueueTraceSpans(t *testing.T) {
h := queue.ProxyHandler(breaker, network.NewRequestStats(time.Now()), true /*tracingEnabled*/, proxy)
h(writer, req)
} else {
h := health.ProbeHandler(healthState, tc.prober, true /* isAggresive*/, true /*tracingEnabled*/, nil)
h := health.ProbeHandler(healthState, tc.prober, true /* isAggressive*/, true /*tracingEnabled*/, nil)
req.Header.Set(network.ProbeHeaderName, tc.requestHeader)
h(writer, req)
}
Expand Down
6 changes: 3 additions & 3 deletions config/core/configmaps/autoscaler.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ metadata:
labels:
serving.knative.dev/release: devel
annotations:
knative.dev/example-checksum: "12baeac1"
knative.dev/example-checksum: "1d830d9e"
data:
_example: |
################################
Expand Down Expand Up @@ -153,11 +153,11 @@ data:
# Scale to zero pod retention period defines the minimum amount
# of time the last pod will remain after Autoscaler has decided to
# scale to zero.
# This flag is for the situations where the pod starup is very expensive
# This flag is for the situations where the pod startup is very expensive
# and the traffic is bursty (requiring smaller windows for fast action),
# but patchy.
# The larger of this flag and `scale-to-zero-grace-period` will effectively
# detemine how the last pod will hang around.
# determine how the last pod will hang around.
scale-to-zero-pod-retention-period: "0s"
# pod-autoscaler-class specifies the default pod autoscaler class
Expand Down
4 changes: 2 additions & 2 deletions config/core/configmaps/observability.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ metadata:
labels:
serving.knative.dev/release: devel
annotations:
knative.dev/example-checksum: "3af7fc85"
knative.dev/example-checksum: "97c1d10b"
data:
_example: |
################################
Expand Down Expand Up @@ -81,7 +81,7 @@ data:
logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}'
# If true, the request logging will be enabled.
# NB: up to and including Knative version 0.18 if logging.requst-log-template is non-empty, this value
# NB: up to and including Knative version 0.18 if logging.request-log-template is non-empty, this value
# will be ignored.
logging.enable-request-log: "false"
Expand Down
4 changes: 2 additions & 2 deletions docs/roadmap/scaling-2019.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ on the CI systems and to avoid unwanted regressions.

As a serverless framework, Knative should only run code when it needs to.
Including scaling to zero when the Revision is not being used. However the
Revison must also come back quickly, otherwise the illusion of "serverless" is
Revision must also come back quickly, otherwise the illusion of "serverless" is
broken--it must seem as if it was always there. Generally less than one second
is a good start.

Expand Down Expand Up @@ -153,7 +153,7 @@ but doesn't yet. For example, enforcing single-threaded request or reporting
concurrency metrics in the way we want. Ultimately we should push these features
upstream and get rid of the queue-proxy sidecar.

However we're not doing that yet because the requirements haven't stablized
However we're not doing that yet because the requirements haven't stabilized
enough yet. And it's still useful to have a component to innovate within.

### Vertical Pod Autoscaling Beta
Expand Down
2 changes: 1 addition & 1 deletion pkg/activator/net/revision_backends.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ type revisionWatcher struct {
logger *zap.SugaredLogger

// podsAddressable will be set to false if we cannot
// probe a pod directly, but its cluster IP has beeen successfully probed.
// probe a pod directly, but its cluster IP has been successfully probed.
podsAddressable bool
}

Expand Down
10 changes: 5 additions & 5 deletions pkg/activator/net/revision_backends_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func privateSKSService(revID types.NamespacedName, clusterIP string, ports []cor
}
}

func waitForRevisionBackedMananger(t *testing.T, rbm *revisionBackendsManager) {
func waitForRevisionBackedManager(t *testing.T, rbm *revisionBackendsManager) {
timeout := time.After(updateTimeout)
for {
select {
Expand Down Expand Up @@ -715,7 +715,7 @@ func TestRevisionBackendManagerAddEndpoint(t *testing.T) {
defer func() {
cancel()
waitInformers()
waitForRevisionBackedMananger(t, rbm)
waitForRevisionBackedManager(t, rbm)
}()

for _, ep := range tc.endpointsArr {
Expand Down Expand Up @@ -1173,7 +1173,7 @@ func TestRevisionDeleted(t *testing.T) {
defer func() {
cancel()
waitInformers()
waitForRevisionBackedMananger(t, rbm)
waitForRevisionBackedManager(t, rbm)
}()

// Make some movements.
Expand Down Expand Up @@ -1229,7 +1229,7 @@ func TestServiceDoesNotExist(t *testing.T) {
defer func() {
cancel()
waitInformers()
waitForRevisionBackedMananger(t, rbm)
waitForRevisionBackedManager(t, rbm)
}()

// Make some movements to generate a checkDests call.
Expand Down Expand Up @@ -1293,7 +1293,7 @@ func TestServiceMoreThanOne(t *testing.T) {
defer func() {
cancel()
waitInformers()
waitForRevisionBackedMananger(t, rbm)
waitForRevisionBackedManager(t, rbm)
}()

ei.Informer().GetIndexer().Add(eps)
Expand Down
6 changes: 3 additions & 3 deletions pkg/activator/net/throttler.go
Original file line number Diff line number Diff line change
Expand Up @@ -488,8 +488,8 @@ func NewThrottler(ctx context.Context, ipAddr string) *Throttler {
FilterFunc: reconciler.LabelFilterFunc(networking.ServiceTypeKey,
string(networking.ServiceTypePublic), false),
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: t.publicEndspointsUpdated,
UpdateFunc: controller.PassNew(t.publicEndspointsUpdated),
AddFunc: t.publicEndpointsUpdated,
UpdateFunc: controller.PassNew(t.publicEndpointsUpdated),
},
})
return t
Expand Down Expand Up @@ -666,7 +666,7 @@ func inferIndex(eps []string, ipAddress string) int {
return idx
}

func (t *Throttler) publicEndspointsUpdated(newObj interface{}) {
func (t *Throttler) publicEndpointsUpdated(newObj interface{}) {
endpoints := newObj.(*corev1.Endpoints)
t.logger.Info("Updated public Endpoints: ", endpoints.Name)
t.epsUpdateCh <- endpoints
Expand Down
2 changes: 1 addition & 1 deletion pkg/activator/net/throttler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1062,7 +1062,7 @@ func TestPickIndices(t *testing.T) {
t.Errorf("EndIndex = %d, want: %d", got, want)
}
if got, want := rem, test.wantR; got != want {
t.Errorf("Remanants = %d, want: %d", got, want)
t.Errorf("Remnants = %d, want: %d", got, want)
}
})
}
Expand Down
14 changes: 7 additions & 7 deletions pkg/apis/autoscaling/annotation_validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,30 +152,30 @@ func TestValidateAnnotations(t *testing.T) {
isInCreate: true,
annotations: map[string]string{MaxScaleAnnotationKey: "9"},
}, {
name: "panic window percentange bad",
name: "panic window percentage bad",
annotations: map[string]string{PanicWindowPercentageAnnotationKey: "-1"},
expectErr: "expected 1 <= -1 <= 100: " + PanicWindowPercentageAnnotationKey,
}, {
name: "panic window percentange bad2",
name: "panic window percentage bad2",
annotations: map[string]string{PanicWindowPercentageAnnotationKey: "202"},
expectErr: "expected 1 <= 202 <= 100: " + PanicWindowPercentageAnnotationKey,
}, {
name: "panic window percentange bad3",
name: "panic window percentage bad3",
annotations: map[string]string{PanicWindowPercentageAnnotationKey: "fifty"},
expectErr: "invalid value: fifty: " + PanicWindowPercentageAnnotationKey,
}, {
name: "panic window percentange good",
name: "panic window percentage good",
annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "210"},
}, {
name: "panic threshold percentange bad2",
name: "panic threshold percentage bad2",
annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "109"},
expectErr: "expected 110 <= 109 <= 1000: " + PanicThresholdPercentageAnnotationKey,
}, {
name: "panic threshold percentange bad2.5",
name: "panic threshold percentage bad2.5",
annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "10009"},
expectErr: "expected 110 <= 10009 <= 1000: " + PanicThresholdPercentageAnnotationKey,
}, {
name: "panic threshold percentange bad3",
name: "panic threshold percentage bad3",
annotations: map[string]string{PanicThresholdPercentageAnnotationKey: "fifty"},
expectErr: "invalid value: fifty: " + PanicThresholdPercentageAnnotationKey,
}, {
Expand Down
2 changes: 1 addition & 1 deletion pkg/apis/autoscaling/v1alpha1/pa_validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func TestPodAutoscalerSpecValidation(t *testing.T) {
},
want: apis.ErrMissingField("protocolType"),
}, {
name: "protcol type invalid",
name: "protocol type invalid",
rs: &PodAutoscalerSpec{
ContainerConcurrency: 0,
ScaleTargetRef: corev1.ObjectReference{
Expand Down
2 changes: 1 addition & 1 deletion pkg/apis/serving/k8s_lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func TransformDeploymentStatus(ds *appsv1.DeploymentStatus) *duckv1.Status {

depCondSet.Manage(s).InitializeConditions()
// The absence of this condition means no failure has occurred. If we find it
// below, we'll ovewrwrite this.
// below, we'll overwrite this.
depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady)

for _, cond := range ds.Conditions {
Expand Down
2 changes: 1 addition & 1 deletion pkg/apis/serving/k8s_validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -709,7 +709,7 @@ func WithinUserContainer(ctx context.Context) context.Context {
// being validated.
type sidecarContainer struct{}

// WithinSidecatrContainer notes on the context that further validation or defaulting
// WithinSidecarContainer notes on the context that further validation or defaulting
// is within the context of a sidecar container in the revision.
func WithinSidecarContainer(ctx context.Context) context.Context {
return context.WithValue(ctx, sidecarContainer{}, struct{}{})
Expand Down
4 changes: 2 additions & 2 deletions pkg/autoscaler/aggregation/bucketing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ func TestTimedFloat64BucketsWindowUpdate(t *testing.T) {
t.Fatalf("Resized bucket count = %d, want: %d", got, want)
}
if got, want := buckets.window, 10*time.Second; got != want {
t.Fatalf("Resized bucket windos = %v, want: %v", got, want)
t.Fatalf("Resized bucket windows = %v, want: %v", got, want)
}

// Verify values were properly copied.
Expand Down Expand Up @@ -338,7 +338,7 @@ func TestTimedFloat64BucketsWindowUpdate3sGranularity(t *testing.T) {
t.Fatalf("Resized bucket count = %d, want: %d", got, want)
}
if got, want := buckets.window, 10*time.Second; got != want {
t.Fatalf("Resized bucket windos = %v, want: %v", got, want)
t.Fatalf("Resized bucket windows = %v, want: %v", got, want)
}

// Verify values were properly copied.
Expand Down
2 changes: 1 addition & 1 deletion pkg/autoscaler/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ func validate(lc *autoscalerconfig.Config) (*autoscalerconfig.Config, error) {
}

// We can't permit stable window be less than our aggregation window for correctness.
// Or too big, so that our desisions are too imprecise.
// Or too big, so that our decisions are too imprecise.
if lc.StableWindow < autoscaling.WindowMin || lc.StableWindow > autoscaling.WindowMax {
return nil, fmt.Errorf("stable-window = %v, must be in [%v; %v] range", lc.StableWindow,
autoscaling.WindowMin, autoscaling.WindowMax)
Expand Down
2 changes: 1 addition & 1 deletion pkg/autoscaler/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func TestNewConfig(t *testing.T) {
return c
}(),
}, {
name: "concurrencty target percentage as percent",
name: "concurrency target percentage as percent",
input: map[string]string{
"container-concurrency-target-percentage": "55",
},
Expand Down
6 changes: 3 additions & 3 deletions pkg/autoscaler/metrics/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,9 +354,9 @@ func (c *collection) lastError() error {
func (c *collection) record(now time.Time, stat Stat) {
// Proxied requests have been counted at the activator. Subtract
// them to avoid double counting.
concurr := stat.AverageConcurrentRequests - stat.AverageProxiedConcurrentRequests
c.concurrencyBuckets.Record(now, concurr)
c.concurrencyPanicBuckets.Record(now, concurr)
concur := stat.AverageConcurrentRequests - stat.AverageProxiedConcurrentRequests
c.concurrencyBuckets.Record(now, concur)
c.concurrencyPanicBuckets.Record(now, concur)
rps := stat.RequestCount - stat.ProxiedRequestCount
c.rpsBuckets.Record(now, rps)
c.rpsPanicBuckets.Record(now, rps)
Expand Down
2 changes: 1 addition & 1 deletion pkg/autoscaler/metrics/stat_conversions.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (sm StatMessage) ToWireStatMessage() *WireStatMessage {
}
}

// ToStatMessage converts the WireStatMessage to a Statmessage.
// ToStatMessage converts the WireStatMessage to a StatMessage.
// Nil-checks must have been done before calling this.
func (wsm WireStatMessage) ToStatMessage() StatMessage {
return StatMessage{
Expand Down
2 changes: 1 addition & 1 deletion pkg/autoscaler/metrics/stats_scraper.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ var (
// stat from an unscraped pod
ErrDidNotReceiveStat = errors.New("did not receive stat from an unscraped pod")

// Sentinel error to retrun from pod scraping routine, when we could not
// Sentinel error to return from pod scraping routine, when we could not
// scrape even a single pod.
errNoPodsScraped = errors.New("no pods scraped")
errPodsExhausted = errors.New("pods exhausted")
Expand Down
4 changes: 2 additions & 2 deletions pkg/autoscaler/scaling/autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ func TestAutoscalerStableModeNoTrafficScaleToZero(t *testing.T) {
// QPS is increasing exponentially. Each scaling event bring concurrency
// back to the target level (1.0) but then traffic continues to increase.
// At 1296 QPS traffic stabilizes.
func TestAutoscalerPanicModeExponentialTrackAndStablize(t *testing.T) {
func TestAutoscalerPanicModeExponentialTrackAndStabilize(t *testing.T) {
metrics := &metricClient{StableConcurrency: 6, PanicConcurrency: 6}
a, pc := newTestAutoscaler(t, 1, 101, metrics)
na := expectedNA(a, 1)
Expand Down Expand Up @@ -477,7 +477,7 @@ func TestAutoscalerScale(t *testing.T) {
wantScale: 90,
wantEBC: expectedEBC(10, 1982, 1, 100),
}, {
label: "AutoscalerStableModeDecreseNonReachable",
label: "AutoscalerStableModeDecreaseNonReachable",
as: newTestAutoscalerNoPC(t, 10 /* target */, 1982 /* TBC */, &metricClient{StableConcurrency: 1, PanicConcurrency: 1}),
baseScale: 100,
prepFunc: func(a *autoscaler) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/autoscaler/statforwarder/forwarder.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ type Forwarder struct {
// Used to capture asynchronous processes for re-enqueuing to be waited
// on when shutting down.
retryWg sync.WaitGroup
// Used to capture asynchronous processe for stats to be waited
// Used to capture asynchronous processes for stats to be waited
// on when shutting down.
processingWg sync.WaitGroup

Expand Down
2 changes: 1 addition & 1 deletion pkg/queue/forwarded_shim.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func ForwardedShimHandler(h http.Handler) http.Handler {

func generateForwarded(xff, xfp, xfh string) string {
fwd := &strings.Builder{}
// The size is dominated by the side of the indiviual headers.
// The size is dominated by the side of the individual headers.
// + 5 + 1 for host= and delimiter
// + 6 + 1 for proto= and delimiter
// + (5 + 4) * x for each for= clause and delimiter (assuming ipv6)
Expand Down
2 changes: 1 addition & 1 deletion pkg/queue/health/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestProbeHandler(t *testing.T) {
req.Header.Set(network.ProbeHeaderName, tc.requestHeader)
}

h := ProbeHandler(healthState, tc.prober, true /* isAggresive*/, true /*tracingEnabled*/, incHandler)
h := ProbeHandler(healthState, tc.prober, true /* isAggressive*/, true /*tracingEnabled*/, incHandler)
h(writer, req)

if got, want := writer.Code, tc.wantCode; got != want {
Expand Down
2 changes: 1 addition & 1 deletion pkg/reconciler/accessor/core/secret.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ type SecretAccessor interface {
func ReconcileSecret(ctx context.Context, owner kmeta.Accessor, desired *corev1.Secret, accessor SecretAccessor) (*corev1.Secret, error) {
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
return nil, fmt.Errorf("recoder for reconciling Secret %s/%s is not created", desired.Namespace, desired.Name)
return nil, fmt.Errorf("recorder for reconciling Secret %s/%s is not created", desired.Namespace, desired.Name)
}
secret, err := accessor.GetSecretLister().Secrets(desired.Namespace).Get(desired.Name)
if apierrs.IsNotFound(err) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/reconciler/accessor/networking/certificate.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func ReconcileCertificate(ctx context.Context, owner kmeta.Accessor, desired *v1

recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
return nil, fmt.Errorf("recoder for reconciling Certificate %s/%s is not created", desired.Namespace, desired.Name)
return nil, fmt.Errorf("recorder for reconciling Certificate %s/%s is not created", desired.Namespace, desired.Name)
}
cert, err := certAccessor.GetCertificateLister().Certificates(desired.Namespace).Get(desired.Name)
if apierrs.IsNotFound(err) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/reconciler/revision/resources/deploy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -972,7 +972,7 @@ func TestMakePodSpec(t *testing.T) {
),
}),
}, {
name: "propertes allowed by the webhook are passed through",
name: "properties allowed by the webhook are passed through",
rev: revision("bar", "foo",
withContainers([]corev1.Container{{
Name: servingContainerName,
Expand Down
8 changes: 4 additions & 4 deletions pkg/reconciler/route/table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1265,7 +1265,7 @@ func TestReconcile(t *testing.T) {
// The Route controller attaches our label to this Configuration.
WithConfigLabel("serving.knative.dev/route", "ingress-mutation"),
),
rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("windemere")),
rev("default", "config", 1, MarkRevisionReady, WithRevName("config-00001"), WithServiceName("windermere")),
mutateIngress(simpleReadyIngress(
Route("default", "ingress-mutation", WithConfigTarget("config"), WithURL),
&traffic.Config{
Expand Down Expand Up @@ -1309,7 +1309,7 @@ func TestReconcile(t *testing.T) {
RevisionName: "config-00001",
Percent: ptr.Int64(100),
},
ServiceName: "windemere",
ServiceName: "windermere",
Active: true,
}},
},
Expand Down Expand Up @@ -2905,7 +2905,7 @@ func cfg(namespace, name string, co ...ConfigOption) *v1.Configuration {
}

func simplePlaceholderK8sService(ctx context.Context, r *v1.Route, targetName string, so ...K8sServiceOption) *corev1.Service {
// omit the error here, as we are sure the loadbalancer info is porvided.
// omit the error here, as we are sure the loadbalancer info is provided.
// return the service instance only, so that the result can be used in TableRow.
svc, _ := resources.MakeK8sPlaceholderService(ctx, r, targetName)

Expand All @@ -2922,7 +2922,7 @@ func simpleK8sService(r *v1.Route, so ...K8sServiceOption) *corev1.Service {
}
ctx := cs.ToContext(context.Background())

// omit the error here, as we are sure the loadbalancer info is porvided.
// omit the error here, as we are sure the loadbalancer info is provided.
// return the service instance only, so that the result can be used in TableRow.
svc, _ := resources.MakeK8sService(ctx, r, "", &netv1alpha1.Ingress{Status: readyIngressStatus()}, false, "")

Expand Down
2 changes: 1 addition & 1 deletion pkg/reconciler/serverlessservice/global_resync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func TestGlobalResyncOnActivatorChange(t *testing.T) {
defer func() {
cancel()
if err := grp.Wait(); err != nil {
t.Fatal("Error waiting for contoller to terminate:", err)
t.Fatal("Error waiting for controller to terminate:", err)
}
waitInformers()
}()
Expand Down
Loading

0 comments on commit 1c8ec8f

Please sign in to comment.