forked from sourcegraph/sourcegraph-public-snapshot
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathservice.go
More file actions
322 lines (267 loc) · 14.4 KB
/
service.go
File metadata and controls
322 lines (267 loc) · 14.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
package uploads
import (
"context"
"fmt"
"time"
"github.com/derision-test/glock"
"github.com/opentracing/opentracing-go/log"
logger "github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/shared/types"
"github.com/sourcegraph/sourcegraph/internal/codeintel/uploads/internal/lsifstore"
"github.com/sourcegraph/sourcegraph/internal/codeintel/uploads/internal/store"
"github.com/sourcegraph/sourcegraph/internal/codeintel/uploads/shared"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
"github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker"
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
"github.com/sourcegraph/sourcegraph/lib/codeintel/precise"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
type Service struct {
store store.Store
repoStore RepoStore
workerutilStore dbworkerstore.Store
lsifstore lsifstore.LsifStore
gitserverClient GitserverClient
policySvc PolicyService
expirationMetrics *expirationMetrics
resetterMetrics *resetterMetrics
janitorMetrics *janitorMetrics
workerMetrics workerutil.WorkerMetrics
policyMatcher PolicyMatcher
locker Locker
logger logger.Logger
operations *operations
clock glock.Clock
}
func newService(
store store.Store,
repoStore RepoStore,
lsifstore lsifstore.LsifStore,
gsc GitserverClient,
policySvc PolicyService,
policyMatcher PolicyMatcher,
locker Locker,
observationContext *observation.Context,
) *Service {
workerutilStore := store.WorkerutilStore(observationContext)
// TODO - move this to metric reporter?
dbworker.InitPrometheusMetric(observationContext, workerutilStore, "codeintel", "upload", nil)
return &Service{
store: store,
repoStore: repoStore,
workerutilStore: workerutilStore,
lsifstore: lsifstore,
gitserverClient: gsc,
policySvc: policySvc,
expirationMetrics: newExpirationMetrics(observationContext),
resetterMetrics: newResetterMetrics(observationContext),
janitorMetrics: newJanitorMetrics(observationContext),
workerMetrics: workerutil.NewMetrics(observationContext, "codeintel_upload_processor"),
policyMatcher: policyMatcher,
locker: locker,
logger: observationContext.Logger,
operations: newOperations(observationContext),
clock: glock.NewRealClock(),
}
}
func (s *Service) GetCommitsVisibleToUpload(ctx context.Context, uploadID, limit int, token *string) (_ []string, nextToken *string, err error) {
ctx, _, endObservation := s.operations.getCommitsVisibleToUpload.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
return s.store.GetCommitsVisibleToUpload(ctx, uploadID, limit, token)
}
func (s *Service) GetCommitGraphMetadata(ctx context.Context, repositoryID int) (stale bool, updatedAt *time.Time, err error) {
ctx, _, endObservation := s.operations.getCommitGraphMetadata.With(ctx, &err, observation.Args{LogFields: []log.Field{log.Int("repositoryID", repositoryID)}})
defer endObservation(1, observation.Args{})
return s.store.GetCommitGraphMetadata(ctx, repositoryID)
}
func (s *Service) GetRepoName(ctx context.Context, repositoryID int) (_ string, err error) {
ctx, _, endObservation := s.operations.getRepoName.With(ctx, &err, observation.Args{LogFields: []log.Field{log.Int("repositoryID", repositoryID)}})
defer endObservation(1, observation.Args{})
return s.store.RepoName(ctx, repositoryID)
}
func (s *Service) GetRepositoriesForIndexScan(ctx context.Context, table, column string, processDelay time.Duration, allowGlobalPolicies bool, repositoryMatchLimit *int, limit int, now time.Time) (_ []int, err error) {
ctx, _, endObservation := s.operations.getRepositoriesForIndexScan.With(ctx, &err, observation.Args{
LogFields: []log.Field{
log.String("table", table),
log.String("column", column),
log.Int("processDelay in ms", int(processDelay.Milliseconds())),
log.Bool("allowGlobalPolicies", allowGlobalPolicies),
log.Int("limit", limit),
log.String("now", now.String()),
},
})
defer endObservation(1, observation.Args{})
return s.store.GetRepositoriesForIndexScan(ctx, table, column, processDelay, allowGlobalPolicies, repositoryMatchLimit, limit, now)
}
// NOTE: Used by autoindexing (for some reason?)
func (s *Service) GetDirtyRepositories(ctx context.Context) (_ map[int]int, err error) {
ctx, _, endObservation := s.operations.getDirtyRepositories.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
return s.store.GetDirtyRepositories(ctx)
}
func (s *Service) GetUploads(ctx context.Context, opts types.GetUploadsOptions) (uploads []types.Upload, totalCount int, err error) {
ctx, _, endObservation := s.operations.getUploads.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("repositoryID", opts.RepositoryID), log.String("state", opts.State), log.String("term", opts.Term)},
})
defer endObservation(1, observation.Args{})
return s.store.GetUploads(ctx, opts)
}
// TODO: Not being used in the resolver layer
func (s *Service) GetUploadByID(ctx context.Context, id int) (_ types.Upload, _ bool, err error) {
ctx, _, endObservation := s.operations.getUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{log.Int("id", id)}})
defer endObservation(1, observation.Args{})
return s.store.GetUploadByID(ctx, id)
}
func (s *Service) GetUploadsByIDs(ctx context.Context, ids ...int) (_ []types.Upload, err error) {
ctx, _, endObservation := s.operations.getUploadsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{log.String("ids", fmt.Sprintf("%v", ids))}})
defer endObservation(1, observation.Args{})
return s.store.GetUploadsByIDs(ctx, ids...)
}
func (s *Service) GetUploadIDsWithReferences(ctx context.Context, orderedMonikers []precise.QualifiedMonikerData, ignoreIDs []int, repositoryID int, commit string, limit int, offset int) (ids []int, recordsScanned int, totalCount int, err error) {
ctx, trace, endObservation := s.operations.getVisibleUploadsMatchingMonikers.With(ctx, &err, observation.Args{
LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
log.Int("limit", limit),
log.Int("offset", offset),
log.String("orderedMonikers", fmt.Sprintf("%v", orderedMonikers)),
log.String("ignoreIDs", fmt.Sprintf("%v", ignoreIDs)),
},
})
defer endObservation(1, observation.Args{})
return s.store.GetUploadIDsWithReferences(ctx, orderedMonikers, ignoreIDs, repositoryID, commit, limit, offset, trace)
}
func (s *Service) DeleteUploadByID(ctx context.Context, id int) (_ bool, err error) {
ctx, _, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{log.Int("id", id)}})
defer endObservation(1, observation.Args{})
return s.store.DeleteUploadByID(ctx, id)
}
func (s *Service) DeleteUploads(ctx context.Context, opts types.DeleteUploadsOptions) (err error) {
ctx, _, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
return s.store.DeleteUploads(ctx, opts)
}
// numAncestors is the number of ancestors to query from gitserver when trying to find the closest
// ancestor we have data for. Setting this value too low (relative to a repository's commit rate)
// will cause requests for an unknown commit return too few results; setting this value too high
// will raise the latency of requests for an unknown commit.
//
// TODO(efritz) - make adjustable via site configuration
const numAncestors = 100
// inferClosestUploads will return the set of visible uploads for the given commit. If this commit is
// newer than our last refresh of the lsif_nearest_uploads table for this repository, then we will mark
// the repository as dirty and quickly approximate the correct set of visible uploads.
//
// Because updating the entire commit graph is a blocking, expensive, and lock-guarded process, we want
// to only do that in the background and do something chearp in latency-sensitive paths. To construct an
// approximate result, we query gitserver for a (relatively small) set of ancestors for the given commit,
// correlate that with the upload data we have for those commits, and re-run the visibility algorithm over
// the graph. This will not always produce the full set of visible commits - some responses may not contain
// all results while a subsequent request made after the lsif_nearest_uploads has been updated to include
// this commit will.
func (s *Service) InferClosestUploads(ctx context.Context, repositoryID int, commit, path string, exactPath bool, indexer string) (_ []types.Dump, err error) {
ctx, _, endObservation := s.operations.inferClosestUploads.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("repositoryID", repositoryID), log.String("commit", commit), log.String("path", path), log.Bool("exactPath", exactPath), log.String("indexer", indexer)},
})
defer endObservation(1, observation.Args{})
// The parameters exactPath and rootMustEnclosePath align here: if we're looking for dumps
// that can answer queries for a directory (e.g. diagnostics), we want any dump that happens
// to intersect the target directory. If we're looking for dumps that can answer queries for
// a single file, then we need a dump with a root that properly encloses that file.
if dumps, err := s.store.FindClosestDumps(ctx, repositoryID, commit, path, exactPath, indexer); err != nil {
return nil, errors.Wrap(err, "store.FindClosestDumps")
} else if len(dumps) != 0 {
return dumps, nil
}
// Repository has no LSIF data at all
if repositoryExists, err := s.store.HasRepository(ctx, repositoryID); err != nil {
return nil, errors.Wrap(err, "dbstore.HasRepository")
} else if !repositoryExists {
return nil, nil
}
// Commit is known and the empty dumps list explicitly means nothing is visible
if commitExists, err := s.store.HasCommit(ctx, repositoryID, commit); err != nil {
return nil, errors.Wrap(err, "dbstore.HasCommit")
} else if commitExists {
return nil, nil
}
// Otherwise, the repository has LSIF data but we don't know about the commit. This commit
// is probably newer than our last upload. Pull back a portion of the updated commit graph
// and try to link it with what we have in the database. Then mark the repository's commit
// graph as dirty so it's updated for subsequent requests.
graph, err := s.gitserverClient.CommitGraph(ctx, repositoryID, gitserver.CommitGraphOptions{
Commit: commit,
Limit: numAncestors,
})
if err != nil {
return nil, errors.Wrap(err, "gitserverClient.CommitGraph")
}
dumps, err := s.store.FindClosestDumpsFromGraphFragment(ctx, repositoryID, commit, path, exactPath, indexer, graph)
if err != nil {
return nil, errors.Wrap(err, "dbstore.FindClosestDumpsFromGraphFragment")
}
if err := s.store.SetRepositoryAsDirty(ctx, repositoryID); err != nil {
return nil, errors.Wrap(err, "dbstore.MarkRepositoryAsDirty")
}
return dumps, nil
}
func (s *Service) GetDumpsWithDefinitionsForMonikers(ctx context.Context, monikers []precise.QualifiedMonikerData) (_ []types.Dump, err error) {
ctx, _, endObservation := s.operations.getDumpsWithDefinitionsForMonikers.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.String("monikers", fmt.Sprintf("%v", monikers))},
})
defer endObservation(1, observation.Args{})
return s.store.GetDumpsWithDefinitionsForMonikers(ctx, monikers)
}
func (s *Service) GetDumpsByIDs(ctx context.Context, ids []int) (_ []types.Dump, err error) {
ctx, _, endObservation := s.operations.getDumpsByIDs.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("total_ids", len(ids)), log.String("ids", fmt.Sprintf("%v", ids))},
})
defer endObservation(1, observation.Args{})
return s.store.GetDumpsByIDs(ctx, ids)
}
func (s *Service) ReferencesForUpload(ctx context.Context, uploadID int) (_ shared.PackageReferenceScanner, err error) {
ctx, _, endObservation := s.operations.referencesForUpload.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("uploadID", uploadID)},
})
defer endObservation(1, observation.Args{})
return s.store.ReferencesForUpload(ctx, uploadID)
}
func (s *Service) GetAuditLogsForUpload(ctx context.Context, uploadID int) (_ []types.UploadLog, err error) {
ctx, _, endObservation := s.operations.getAuditLogsForUpload.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("uploadID", uploadID)},
})
defer endObservation(1, observation.Args{})
return s.store.GetAuditLogsForUpload(ctx, uploadID)
}
func (s *Service) GetUploadDocumentsForPath(ctx context.Context, bundleID int, pathPattern string) (_ []string, _ int, err error) {
ctx, _, endObservation := s.operations.getUploadDocumentsForPath.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("bundleID", bundleID), log.String("pathPattern", pathPattern)},
})
defer endObservation(1, observation.Args{})
return s.lsifstore.GetUploadDocumentsForPath(ctx, bundleID, pathPattern)
}
func (s *Service) GetRecentUploadsSummary(ctx context.Context, repositoryID int) (upload []shared.UploadsWithRepositoryNamespace, err error) {
ctx, _, endObservation := s.operations.getRecentUploadsSummary.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("repositoryID", repositoryID)},
})
defer endObservation(1, observation.Args{})
return s.store.GetRecentUploadsSummary(ctx, repositoryID)
}
func (s *Service) GetLastUploadRetentionScanForRepository(ctx context.Context, repositoryID int) (_ *time.Time, err error) {
ctx, _, endObservation := s.operations.getLastUploadRetentionScanForRepository.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int("repositoryID", repositoryID)},
})
defer endObservation(1, observation.Args{})
return s.store.GetLastUploadRetentionScanForRepository(ctx, repositoryID)
}
func (s *Service) GetListTags(ctx context.Context, repo api.RepoName, commitObjs ...string) (_ []*gitdomain.Tag, err error) {
ctx, _, endObservation := s.operations.getListTags.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.String("repo", string(repo)), log.String("commitObjs", fmt.Sprintf("%v", commitObjs))},
})
defer endObservation(1, observation.Args{})
return s.gitserverClient.ListTags(ctx, repo, commitObjs...)
}