@@ -663,6 +663,72 @@ describe('CodeWhisperer Server', () => {
663
663
sinon . assert . calledOnceWithExactly ( service . generateSuggestions , expectedGenerateSuggestionsRequest )
664
664
} )
665
665
666
+ it ( 'should truncate left and right context in paginated requests' , async ( ) => {
667
+ // Reset the stub to handle multiple calls with different responses
668
+ service . generateSuggestions . reset ( )
669
+
670
+ // First request returns suggestions with a nextToken
671
+ service . generateSuggestions . onFirstCall ( ) . returns (
672
+ Promise . resolve ( {
673
+ suggestions : EXPECTED_SUGGESTION ,
674
+ responseContext : { ...EXPECTED_RESPONSE_CONTEXT , nextToken : EXPECTED_NEXT_TOKEN } ,
675
+ } )
676
+ )
677
+
678
+ // Second request (pagination) returns suggestions without nextToken
679
+ service . generateSuggestions . onSecondCall ( ) . returns (
680
+ Promise . resolve ( {
681
+ suggestions : EXPECTED_SUGGESTION ,
682
+ responseContext : EXPECTED_RESPONSE_CONTEXT ,
683
+ } )
684
+ )
685
+
686
+ // Create a file with content that exceeds the context limit
687
+ const BIG_FILE_CONTENT = '123456789\n' . repeat ( 5000 )
688
+ const BIG_FILE = TextDocument . create ( 'file:///big_file.cs' , 'csharp' , 1 , BIG_FILE_CONTENT )
689
+ const cutOffLine = 2000
690
+ features . openDocument ( BIG_FILE )
691
+
692
+ // Make initial request
693
+ await features . doInlineCompletionWithReferences (
694
+ {
695
+ textDocument : { uri : BIG_FILE . uri } ,
696
+ position : { line : cutOffLine , character : 1 } ,
697
+ context : { triggerKind : InlineCompletionTriggerKind . Invoked } ,
698
+ } ,
699
+ CancellationToken . None
700
+ )
701
+
702
+ // Make paginated request with the token from the first response
703
+ await features . doInlineCompletionWithReferences (
704
+ {
705
+ textDocument : { uri : BIG_FILE . uri } ,
706
+ position : { line : cutOffLine , character : 1 } ,
707
+ context : { triggerKind : InlineCompletionTriggerKind . Invoked } ,
708
+ partialResultToken : EXPECTED_NEXT_TOKEN ,
709
+ } ,
710
+ CancellationToken . None
711
+ )
712
+
713
+ // Verify both calls were made
714
+ assert . strictEqual ( service . generateSuggestions . callCount , 2 )
715
+
716
+ // Get the actual arguments from both calls
717
+ const firstCallArgs = service . generateSuggestions . firstCall . args [ 0 ]
718
+ const secondCallArgs = service . generateSuggestions . secondCall . args [ 0 ]
719
+
720
+ // Verify context truncation in first call
721
+ assert . strictEqual ( firstCallArgs . fileContext . leftFileContent . length , CONTEXT_CHARACTERS_LIMIT )
722
+ assert . strictEqual ( firstCallArgs . fileContext . rightFileContent . length , CONTEXT_CHARACTERS_LIMIT )
723
+
724
+ // Verify context truncation in second call (pagination)
725
+ assert . strictEqual ( secondCallArgs . fileContext . leftFileContent . length , CONTEXT_CHARACTERS_LIMIT )
726
+ assert . strictEqual ( secondCallArgs . fileContext . rightFileContent . length , CONTEXT_CHARACTERS_LIMIT )
727
+
728
+ // Verify second call included the nextToken
729
+ assert . strictEqual ( secondCallArgs . nextToken , EXPECTED_NEXT_TOKEN )
730
+ } )
731
+
666
732
it ( 'throws ResponseError with expected message if connection is expired' , async ( ) => {
667
733
service . generateSuggestions . returns ( Promise . reject ( new Error ( INVALID_TOKEN ) ) )
668
734
0 commit comments