-
Notifications
You must be signed in to change notification settings - Fork 83
/
Copy pathwaitfor.go
428 lines (366 loc) · 13.3 KB
/
waitfor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
package linodego
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"time"
"github.com/linode/linodego/internal/kubernetes"
"github.com/linode/linodego/pkg/condition"
)
// WaitForInstanceStatus waits for the Linode instance to reach the desired state
// before returning. It will timeout with an error after timeoutSeconds.
func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int, status InstanceStatus, timeoutSeconds int) (*Instance, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
instance, err := client.GetInstance(ctx, instanceID)
if err != nil {
return instance, err
}
complete := (instance.Status == status)
if complete {
return instance, nil
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Instance %d status %s: %s", instanceID, status, ctx.Err())
}
}
}
// WaitForInstanceDiskStatus waits for the Linode instance disk to reach the desired state
// before returning. It will timeout with an error after timeoutSeconds.
func (client Client) WaitForInstanceDiskStatus(ctx context.Context, instanceID int, diskID int, status DiskStatus, timeoutSeconds int) (*InstanceDisk, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// GetInstanceDisk will 404 on newly created disks. use List instead.
// disk, err := client.GetInstanceDisk(ctx, instanceID, diskID)
disks, err := client.ListInstanceDisks(ctx, instanceID, nil)
if err != nil {
return nil, err
}
for _, disk := range disks {
disk := disk
if disk.ID == diskID {
complete := (disk.Status == status)
if complete {
return &disk, nil
}
break
}
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Instance %d Disk %d status %s: %s", instanceID, diskID, status, ctx.Err())
}
}
}
// WaitForVolumeStatus waits for the Volume to reach the desired state
// before returning. It will timeout with an error after timeoutSeconds.
func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, status VolumeStatus, timeoutSeconds int) (*Volume, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
volume, err := client.GetVolume(ctx, volumeID)
if err != nil {
return volume, err
}
complete := (volume.Status == status)
if complete {
return volume, nil
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Volume %d status %s: %s", volumeID, status, ctx.Err())
}
}
}
// WaitForSnapshotStatus waits for the Snapshot to reach the desired state
// before returning. It will timeout with an error after timeoutSeconds.
func (client Client) WaitForSnapshotStatus(ctx context.Context, instanceID int, snapshotID int, status InstanceSnapshotStatus, timeoutSeconds int) (*InstanceSnapshot, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
snapshot, err := client.GetInstanceSnapshot(ctx, instanceID, snapshotID)
if err != nil {
return snapshot, err
}
complete := (snapshot.Status == status)
if complete {
return snapshot, nil
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Instance %d Snapshot %d status %s: %s", instanceID, snapshotID, status, ctx.Err())
}
}
}
// WaitForVolumeLinodeID waits for the Volume to match the desired LinodeID
// before returning. An active Instance will not immediately attach or detach a volume, so the
// the LinodeID must be polled to determine volume readiness from the API.
// WaitForVolumeLinodeID will timeout with an error after timeoutSeconds.
func (client Client) WaitForVolumeLinodeID(ctx context.Context, volumeID int, linodeID *int, timeoutSeconds int) (*Volume, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
volume, err := client.GetVolume(ctx, volumeID)
if err != nil {
return volume, err
}
switch {
case linodeID == nil && volume.LinodeID == nil:
return volume, nil
case linodeID == nil || volume.LinodeID == nil:
// continue waiting
case *volume.LinodeID == *linodeID:
return volume, nil
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Volume %d to have Instance %v: %s", volumeID, linodeID, ctx.Err())
}
}
}
// WaitForLKEClusterStatus waits for the LKECluster to reach the desired state
// before returning. It will timeout with an error after timeoutSeconds.
func (client Client) WaitForLKEClusterStatus(ctx context.Context, clusterID int, status LKEClusterStatus, timeoutSeconds int) (*LKECluster, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
cluster, err := client.GetLKECluster(ctx, clusterID)
if err != nil {
return cluster, err
}
complete := (cluster.Status == status)
if complete {
return cluster, nil
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Cluster %d status %s: %s", clusterID, status, ctx.Err())
}
}
}
// LKEClusterPollOptions configures polls against LKE Clusters.
type LKEClusterPollOptions struct {
// TimeoutSeconds is the number of Seconds to wait for the poll to succeed
// before exiting.
TimeoutSeconds int
// TansportWrapper allows adding a transport middleware function that will
// wrap the LKE Cluster client's undelying http.RoundTripper.
TransportWrapper func(http.RoundTripper) http.RoundTripper
}
// WaitForLKEClusterConditions waits for the given LKE conditions to be true
func (client Client) WaitForLKEClusterConditions(
ctx context.Context,
clusterID int,
options LKEClusterPollOptions,
conditions ...condition.ClusterConditionFunc,
) error {
ctx, cancel := context.WithCancel(ctx)
if options.TimeoutSeconds != 0 {
ctx, cancel = context.WithTimeout(ctx, time.Duration(options.TimeoutSeconds)*time.Second)
}
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
var prevLog string
var clientset kubernetes.Clientset
clientReady := func(ctx context.Context, c kubernetes.Clientset) (bool, error) {
if clientset == nil {
resp, err := client.GetLKEClusterKubeconfig(ctx, clusterID)
if err != nil {
return false, fmt.Errorf("failed to get Kubeconfig for LKE cluster %d: %s", clusterID, err)
}
clientset, err = kubernetes.BuildClientsetFromConfigBytes(ctx, resp.KubeConfig, options.TransportWrapper)
if err != nil {
return false, fmt.Errorf("failed to build client for LKE cluster %d: %s", clusterID, err)
}
log.Printf("[INFO] successfully built client for LKE cluster %d\n", clusterID)
}
return true, nil
}
clusterConditions := append([]condition.ClusterConditionFunc{clientReady}, conditions...)
for _, condition := range clusterConditions {
ConditionSucceeded:
for {
select {
case <-ticker.C:
result, err := condition(ctx, clientset)
if err != nil {
if err.Error() != prevLog {
prevLog = err.Error()
log.Printf("[ERROR] %s\n", err)
}
}
if result {
break ConditionSucceeded
}
case <-ctx.Done():
return fmt.Errorf("Error waiting for cluster %d conditions: %s", clusterID, ctx.Err())
}
}
}
return nil
}
// WaitForLKEClusterReady polls with a given timeout for the LKE Cluster's api-server
// to be healthy and for the cluster to have at least one node with the NodeReady
// condition true.
func (client Client) WaitForLKEClusterReady(ctx context.Context, clusterID int, options LKEClusterPollOptions) error {
return client.WaitForLKEClusterConditions(ctx, clusterID, options, condition.ClusterHasReadyNode)
}
// WaitForEventFinished waits for an entity action to reach the 'finished' state
// before returning. It will timeout with an error after timeoutSeconds.
// If the event indicates a failure both the failed event and the error will be returned.
// nolint
func (client Client) WaitForEventFinished(ctx context.Context, id interface{}, entityType EntityType, action EventAction, minStart time.Time, timeoutSeconds int) (*Event, error) {
titledEntityType := strings.Title(string(entityType))
filterStruct := map[string]interface{}{
// Nor is action
//"action": action,
// Created is not correctly filtered by the API
// We'll have to verify these values manually, for now.
//"created": map[string]interface{}{
// "+gte": minStart.Format(time.RFC3339),
//},
// With potentially 1000+ events coming back, we should filter on something
// Warning: This optimization has the potential to break if users are clearing
// events before we see them.
"seen": false,
// Float the latest events to page 1
"+order_by": "created",
"+order": "desc",
}
// Optimistically restrict results to page 1. We should remove this when more
// precise filtering options exist.
pages := 1
// The API has limitted filtering support for Event ID and Event Type
// Optimize the list, if possible
switch entityType {
case EntityDisk, EntityLinode, EntityDomain, EntityNodebalancer:
// All of the filter supported types have int ids
filterableEntityID, err := strconv.Atoi(fmt.Sprintf("%v", id))
if err != nil {
return nil, fmt.Errorf("Error parsing Entity ID %q for optimized WaitForEventFinished EventType %q: %s", id, entityType, err)
}
filterStruct["entity.id"] = filterableEntityID
filterStruct["entity.type"] = entityType
// TODO: are we conformatable with pages = 0 with the event type and id filter?
}
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
if deadline, ok := ctx.Deadline(); ok {
duration := time.Until(deadline)
log.Printf("[INFO] Waiting %d seconds for %s events since %v for %s %v", int(duration.Seconds()), action, minStart, titledEntityType, id)
}
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
// avoid repeating log messages
nextLog := ""
lastLog := ""
lastEventID := 0
defer ticker.Stop()
for {
select {
case <-ticker.C:
if lastEventID > 0 {
filterStruct["id"] = map[string]interface{}{
"+gte": lastEventID,
}
}
filter, err := json.Marshal(filterStruct)
if err != nil {
return nil, err
}
listOptions := NewListOptions(pages, string(filter))
events, err := client.ListEvents(ctx, listOptions)
if err != nil {
return nil, err
}
// If there are events for this instance + action, inspect them
for _, event := range events {
event := event
if event.Action != action {
// log.Println("action mismatch", event.Action, action)
continue
}
if event.Entity == nil || event.Entity.Type != entityType {
// log.Println("type mismatch", event.Entity.Type, entityType)
continue
}
var entID string
switch id := event.Entity.ID.(type) {
case float64, float32:
entID = fmt.Sprintf("%.f", id)
case int:
entID = strconv.Itoa(id)
default:
entID = fmt.Sprintf("%v", id)
}
var findID string
switch id := id.(type) {
case float64, float32:
findID = fmt.Sprintf("%.f", id)
case int:
findID = strconv.Itoa(id)
default:
findID = fmt.Sprintf("%v", id)
}
if entID != findID {
// log.Println("id mismatch", entID, findID)
continue
}
// @TODO(displague) This event.Created check shouldn't be needed, but it appears
// that the ListEvents method is not populating it correctly
if event.Created == nil {
log.Printf("[WARN] event.Created is nil when API returned: %#+v", event.Created)
} else if *event.Created != minStart && !event.Created.After(minStart) {
// Not the event we were looking for
// log.Println(event.Created, "is not >=", minStart)
continue
}
// This is the event we are looking for. Save our place.
if lastEventID == 0 {
lastEventID = event.ID
}
switch event.Status {
case EventFailed:
return &event, fmt.Errorf("%s %v action %s failed", titledEntityType, id, action)
case EventFinished:
log.Printf("[INFO] %s %v action %s is finished", titledEntityType, id, action)
return &event, nil
}
// TODO(displague) can we bump the ticker to TimeRemaining/2 (>=1) when non-nil?
nextLog = fmt.Sprintf("[INFO] %s %v action %s is %s", titledEntityType, id, action, event.Status)
}
// de-dupe logging statements
if nextLog != lastLog {
log.Print(nextLog)
lastLog = nextLog
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Event Status '%s' of %s %v action '%s': %s", EventFinished, titledEntityType, id, action, ctx.Err())
}
}
}