Skip to content

Commit fdff738

Browse files
author
Stephen Gutekanst
authored
backout API docs changes (sourcegraph#25715)
* Revert "API docs: codeintel: add OOB migration to index API docs for search (sourcegraph#25207)" This reverts commit b910fed. * Revert "API docs: codeintel: begin indexing API docs for search (sourcegraph#25666)" This reverts commit 30f0c79.
1 parent b910fed commit fdff738

20 files changed

Lines changed: 47 additions & 732 deletions

File tree

doc/dev/background-information/codeintel/apidocs/index.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ On Sourcegraph.com, only a few thousand repos have Go LSIF data (as of Sept 15,
100100

101101
We make it easy to limit the amount of resources going to API docs search as a feature, since it is desirable to both prevent unbounded growth issues on e.g. Sourcegraph.com and prevent any unexpected resource consumption on enterprise instances (e.g. if someone out there has a Postgres instance provisioned well today, but has hundreds of thousands of Go repositories with LSIF indexing, adding this table may increase resource usage.)
102102

103-
In specific, a site configuration option `"apidocs.search.index-size-limit-factor": 1.0` enables limiting the index size. The value `1.0` is a multiple of 250 million symbols, i.e., `1.0` indicates 250 million symbols (approx 12.5k Go repos) can be in the public and private search indexes independently (500 million total), `2.0` indicates 500 million symbols (approx 50k Go repos), and so on.
103+
In specific, a site configuration option `"apidocs.search-index-limit-factor": 1.0` enables limiting the index size. The value `1.0` is a multiple of 250 million symbols, i.e., `1.0` indicates 250 million symbols (approx 12.5k Go repos) can be in the public and private search indexes independently (500 million total), `2.0` indicates 500 million symbols (approx 50k Go repos), and so on.
104104

105105
We implement this by merely requesting an estimate number of rows in the table:
106106

enterprise/cmd/frontend/internal/codeintel/config.go

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,6 @@ type Config struct {
2222
ReferencesCountMigrationBatchInterval time.Duration
2323
DocumentColumnSplitMigrationBatchSize int
2424
DocumentColumnSplitMigrationBatchInterval time.Duration
25-
APIDocsSearchMigrationBatchSize int
26-
APIDocsSearchMigrationBatchInterval time.Duration
2725
CommittedAtMigrationBatchSize int
2826
CommittedAtMigrationBatchInterval time.Duration
2927
ReferenceCountMigrationBatchSize int
@@ -50,8 +48,6 @@ func init() {
5048
config.ReferencesCountMigrationBatchInterval = config.GetInterval("PRECISE_CODE_INTEL_REFERENCES_COUNT_MIGRATION_BATCH_INTERVAL", "1s", "The timeout between processing migration batches.")
5149
config.DocumentColumnSplitMigrationBatchSize = config.GetInt("PRECISE_CODE_INTEL_DOCUMENT_COLUMN_SPLIT_MIGRATION_BATCH_SIZE", "100", "The maximum number of document records to migrate at a time.")
5250
config.DocumentColumnSplitMigrationBatchInterval = config.GetInterval("PRECISE_CODE_INTEL_DOCUMENT_COLUMN_SPLIT_MIGRATION_BATCH_INTERVAL", "1s", "The timeout between processing migration batches.")
53-
config.APIDocsSearchMigrationBatchSize = config.GetInt("PRECISE_CODE_INTEL_API_DOCS_SEARCH_MIGRATION_BATCH_SIZE", "1", "The maximum number of bundles to migrate at a time.")
54-
config.APIDocsSearchMigrationBatchInterval = config.GetInterval("PRECISE_CODE_INTEL_API_DOCS_SEARCH_MIGRATION_BATCH_INTERVAL", "1s", "The timeout between processing migration batches.")
5551
config.CommittedAtMigrationBatchSize = config.GetInt("PRECISE_CODE_INTEL_COMMITTED_AT_MIGRATION_BATCH_SIZE", "100", "The maximum number of upload records to migrate at a time.")
5652
config.CommittedAtMigrationBatchInterval = config.GetInterval("PRECISE_CODE_INTEL_COMMITTED_AT_MIGRATION_BATCH_INTERVAL", "1s", "The timeout between processing migration batches.")
5753
config.ReferenceCountMigrationBatchSize = config.GetInt("PRECISE_CODE_INTEL_REFERENCE_COUNT_MIGRATION_BATCH_SIZE", "100", "The maximum number of upload records to migrate at a time.")

enterprise/cmd/frontend/internal/codeintel/migrations.go

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -44,20 +44,6 @@ func registerMigrations(ctx context.Context, db dbutil.DB, outOfBandMigrationRun
4444
return err
4545
}
4646

47-
if err := outOfBandMigrationRunner.Register(
48-
lsifmigrations.APIDocsSearchMigrationID, // 12
49-
lsifmigrations.NewAPIDocsSearchMigrator(
50-
services.lsifStore,
51-
services.dbStore,
52-
services.repoStore,
53-
services.gitserverClient,
54-
config.APIDocsSearchMigrationBatchSize,
55-
),
56-
oobmigration.MigratorOptions{Interval: config.APIDocsSearchMigrationBatchInterval},
57-
); err != nil {
58-
return err
59-
}
60-
6147
if err := outOfBandMigrationRunner.Register(
6248
dbmigrations.CommittedAtMigrationID, // 8
6349
dbmigrations.NewCommittedAtMigrator(services.dbStore, services.gitserverClient, config.CommittedAtMigrationBatchSize),

enterprise/cmd/frontend/internal/codeintel/services.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ import (
1717
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/lsifstore"
1818
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/uploadstore"
1919
"github.com/sourcegraph/sourcegraph/internal/conf"
20-
"github.com/sourcegraph/sourcegraph/internal/database"
2120
"github.com/sourcegraph/sourcegraph/internal/database/dbconn"
2221
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
2322
"github.com/sourcegraph/sourcegraph/internal/database/locker"
@@ -30,7 +29,6 @@ var services struct {
3029
dbStore *store.Store
3130
locker *locker.Locker
3231
lsifStore *lsifstore.Store
33-
repoStore *database.RepoStore
3432
uploadStore uploadstore.Store
3533
gitserverClient *gitserver.Client
3634
indexEnqueuer *enqueuer.IndexEnqueuer
@@ -74,7 +72,6 @@ func initServices(ctx context.Context, db dbutil.DB) error {
7472
services.dbStore = dbStore
7573
services.locker = locker
7674
services.lsifStore = lsifStore
77-
services.repoStore = database.ReposWith(dbStore.Store)
7875
services.uploadStore = uploadStore
7976
services.gitserverClient = gitserverClient
8077
services.indexEnqueuer = indexEnqueuer

enterprise/cmd/precise-code-intel-worker/internal/worker/handler.go

Lines changed: 17 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,11 @@ import (
1515
"github.com/keegancsmith/sqlf"
1616

1717
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
18-
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/dbstore"
1918
store "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/dbstore"
2019
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/uploadstore"
2120
"github.com/sourcegraph/sourcegraph/internal/api"
2221
"github.com/sourcegraph/sourcegraph/internal/honey"
2322
"github.com/sourcegraph/sourcegraph/internal/trace"
24-
"github.com/sourcegraph/sourcegraph/internal/types"
2523
"github.com/sourcegraph/sourcegraph/internal/vcs"
2624
"github.com/sourcegraph/sourcegraph/internal/workerutil"
2725
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
@@ -89,21 +87,10 @@ func (h *handler) handle(ctx context.Context, upload store.Upload) (requeued boo
8987
}
9088
}()
9189

92-
repo, err := backend.Repos.Get(ctx, api.RepoID(upload.RepositoryID))
93-
if err != nil {
94-
return false, errors.Wrap(err, "Repos.Get")
95-
}
96-
97-
if requeued, err := requeueIfCloning(ctx, h.workerStore, upload, repo); err != nil || requeued {
90+
if requeued, err := requeueIfCloning(ctx, h.workerStore, upload); err != nil || requeued {
9891
return requeued, err
9992
}
10093

101-
// Determine if the upload is for the default Git branch.
102-
isDefaultBranch, err := h.gitserverClient.DefaultBranchContains(ctx, upload.RepositoryID, upload.Commit)
103-
if err != nil {
104-
return false, errors.Wrap(err, "gitserver.DefaultBranchContains")
105-
}
106-
10794
getChildren := func(ctx context.Context, dirnames []string) (map[string][]string, error) {
10895
directoryChildren, err := h.gitserverClient.DirectoryChildren(ctx, upload.RepositoryID, upload.Commit, dirnames)
10996
if err != nil {
@@ -120,7 +107,7 @@ func (h *handler) handle(ctx context.Context, upload store.Upload) (requeued boo
120107

121108
// Note: this is writing to a different database than the block below, so we need to use a
122109
// different transaction context (managed by the writeData function).
123-
if err := writeData(ctx, h.lsifStore, upload, repo, isDefaultBranch, groupedBundleData); err != nil {
110+
if err := writeData(ctx, h.lsifStore, upload.ID, groupedBundleData); err != nil {
124111
if isUniqueConstraintViolation(err) {
125112
// If this is a unique constraint violation, then we've previously processed this same
126113
// upload record up to this point, but failed to perform the transaction below. We can
@@ -213,7 +200,12 @@ const CloneInProgressDelay = time.Minute
213200
// if the repo has finished cloning and the revision does not exist, then the upload will fail to process.
214201
// If the repo is currently cloning, then we'll requeue the upload to be tried again later. This will not
215202
// increase the reset count of the record (so this doesn't count against the upload as a legitimate attempt).
216-
func requeueIfCloning(ctx context.Context, workerStore dbworkerstore.Store, upload store.Upload, repo *types.Repo) (requeued bool, _ error) {
203+
func requeueIfCloning(ctx context.Context, workerStore dbworkerstore.Store, upload store.Upload) (requeued bool, _ error) {
204+
repo, err := backend.Repos.Get(ctx, api.RepoID(upload.RepositoryID))
205+
if err != nil {
206+
return false, errors.Wrap(err, "Repos.Get")
207+
}
208+
217209
if _, err := backend.Repos.ResolveRev(ctx, repo, upload.Commit); err != nil {
218210
if !vcs.IsCloneInProgress(err) {
219211
return false, errors.Wrap(err, "Repos.ResolveRev")
@@ -260,35 +252,35 @@ func withUploadData(ctx context.Context, uploadStore uploadstore.Store, id int,
260252
}
261253

262254
// writeData transactionally writes the given grouped bundle data into the given LSIF store.
263-
func writeData(ctx context.Context, lsifStore LSIFStore, upload dbstore.Upload, repo *types.Repo, isDefaultBranch bool, groupedBundleData *precise.GroupedBundleDataChans) (err error) {
255+
func writeData(ctx context.Context, lsifStore LSIFStore, id int, groupedBundleData *precise.GroupedBundleDataChans) (err error) {
264256
tx, err := lsifStore.Transact(ctx)
265257
if err != nil {
266258
return err
267259
}
268260
defer func() { err = tx.Done(err) }()
269261

270-
if err := tx.WriteMeta(ctx, upload.ID, groupedBundleData.Meta); err != nil {
262+
if err := tx.WriteMeta(ctx, id, groupedBundleData.Meta); err != nil {
271263
return errors.Wrap(err, "store.WriteMeta")
272264
}
273-
if err := tx.WriteDocuments(ctx, upload.ID, groupedBundleData.Documents); err != nil {
265+
if err := tx.WriteDocuments(ctx, id, groupedBundleData.Documents); err != nil {
274266
return errors.Wrap(err, "store.WriteDocuments")
275267
}
276-
if err := tx.WriteResultChunks(ctx, upload.ID, groupedBundleData.ResultChunks); err != nil {
268+
if err := tx.WriteResultChunks(ctx, id, groupedBundleData.ResultChunks); err != nil {
277269
return errors.Wrap(err, "store.WriteResultChunks")
278270
}
279-
if err := tx.WriteDefinitions(ctx, upload.ID, groupedBundleData.Definitions); err != nil {
271+
if err := tx.WriteDefinitions(ctx, id, groupedBundleData.Definitions); err != nil {
280272
return errors.Wrap(err, "store.WriteDefinitions")
281273
}
282-
if err := tx.WriteReferences(ctx, upload.ID, groupedBundleData.References); err != nil {
274+
if err := tx.WriteReferences(ctx, id, groupedBundleData.References); err != nil {
283275
return errors.Wrap(err, "store.WriteReferences")
284276
}
285-
if err := tx.WriteDocumentationPages(ctx, upload, repo, isDefaultBranch, groupedBundleData.DocumentationPages); err != nil {
277+
if err := tx.WriteDocumentationPages(ctx, id, groupedBundleData.DocumentationPages); err != nil {
286278
return errors.Wrap(err, "store.WriteDocumentationPages")
287279
}
288-
if err := tx.WriteDocumentationPathInfo(ctx, upload.ID, groupedBundleData.DocumentationPathInfo); err != nil {
280+
if err := tx.WriteDocumentationPathInfo(ctx, id, groupedBundleData.DocumentationPathInfo); err != nil {
289281
return errors.Wrap(err, "store.WriteDocumentationPathInfo")
290282
}
291-
if err := tx.WriteDocumentationMappings(ctx, upload.ID, groupedBundleData.DocumentationMappings); err != nil {
283+
if err := tx.WriteDocumentationMappings(ctx, id, groupedBundleData.DocumentationMappings); err != nil {
292284
return errors.Wrap(err, "store.WriteDocumentationMappings")
293285
}
294286

enterprise/cmd/precise-code-intel-worker/internal/worker/iface.go

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import (
99
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/lsifstore"
1010
"github.com/sourcegraph/sourcegraph/internal/api"
1111
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
12-
"github.com/sourcegraph/sourcegraph/internal/types"
1312
"github.com/sourcegraph/sourcegraph/lib/codeintel/precise"
1413
)
1514

@@ -57,7 +56,7 @@ type LSIFStore interface {
5756
WriteResultChunks(ctx context.Context, bundleID int, resultChunks chan precise.IndexedResultChunkData) error
5857
WriteDefinitions(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) error
5958
WriteReferences(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) error
60-
WriteDocumentationPages(ctx context.Context, upload dbstore.Upload, repo *types.Repo, isDefaultBranch bool, documentation chan *precise.DocumentationPageData) error
59+
WriteDocumentationPages(ctx context.Context, bundleID int, documentation chan *precise.DocumentationPageData) error
6160
WriteDocumentationPathInfo(ctx context.Context, bundleID int, documentation chan *precise.DocumentationPathInfoData) error
6261
WriteDocumentationMappings(ctx context.Context, bundleID int, mappings chan precise.DocumentationMapping) error
6362
}
@@ -79,5 +78,4 @@ type GitserverClient interface {
7978
DirectoryChildren(ctx context.Context, repositoryID int, commit string, dirnames []string) (map[string][]string, error)
8079
CommitDate(ctx context.Context, repositoryID int, commit string) (time.Time, error)
8180
ResolveRevision(ctx context.Context, repositoryID int, versionString string) (api.CommitID, error)
82-
DefaultBranchContains(ctx context.Context, repositoryID int, commit string) (bool, error)
8381
}

0 commit comments

Comments
 (0)