Initial Sample.
This commit is contained in:
1
graphql-subscription/node_modules/.bin/loose-envify
generated
vendored
Symbolic link
1
graphql-subscription/node_modules/.bin/loose-envify
generated
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../loose-envify/cli.js
|
||||
297
graphql-subscription/node_modules/.package-lock.json
generated
vendored
Normal file
297
graphql-subscription/node_modules/.package-lock.json
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
{
|
||||
"name": "graphql-example",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"node_modules/@apollo/client": {
|
||||
"version": "3.9.5",
|
||||
"resolved": "https://registry.npmjs.org/@apollo/client/-/client-3.9.5.tgz",
|
||||
"integrity": "sha512-7y+c8MTPU+hhTwvcGVtMMGIgWduzrvG1mz5yJMRyqYbheBkkky3Lki6ADWVSBXG1lZoOtPYvB2zDgVfKb2HSsw==",
|
||||
"dependencies": {
|
||||
"@graphql-typed-document-node/core": "^3.1.1",
|
||||
"@wry/caches": "^1.0.0",
|
||||
"@wry/equality": "^0.5.6",
|
||||
"@wry/trie": "^0.5.0",
|
||||
"graphql-tag": "^2.12.6",
|
||||
"hoist-non-react-statics": "^3.3.2",
|
||||
"optimism": "^0.18.0",
|
||||
"prop-types": "^15.7.2",
|
||||
"rehackt": "0.0.5",
|
||||
"response-iterator": "^0.2.6",
|
||||
"symbol-observable": "^4.0.0",
|
||||
"ts-invariant": "^0.10.3",
|
||||
"tslib": "^2.3.0",
|
||||
"zen-observable-ts": "^1.2.5"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"graphql": "^15.0.0 || ^16.0.0",
|
||||
"graphql-ws": "^5.5.5",
|
||||
"react": "^16.8.0 || ^17.0.0 || ^18.0.0",
|
||||
"react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0",
|
||||
"subscriptions-transport-ws": "^0.9.0 || ^0.11.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"graphql-ws": {
|
||||
"optional": true
|
||||
},
|
||||
"react": {
|
||||
"optional": true
|
||||
},
|
||||
"react-dom": {
|
||||
"optional": true
|
||||
},
|
||||
"subscriptions-transport-ws": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@graphql-typed-document-node/core": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz",
|
||||
"integrity": "sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==",
|
||||
"peerDependencies": {
|
||||
"graphql": "^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@wry/caches": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@wry/caches/-/caches-1.0.1.tgz",
|
||||
"integrity": "sha512-bXuaUNLVVkD20wcGBWRyo7j9N3TxePEWFZj2Y+r9OoUzfqmavM84+mFykRicNsBqatba5JLay1t48wxaXaWnlA==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@wry/context": {
|
||||
"version": "0.7.4",
|
||||
"resolved": "https://registry.npmjs.org/@wry/context/-/context-0.7.4.tgz",
|
||||
"integrity": "sha512-jmT7Sb4ZQWI5iyu3lobQxICu2nC/vbUhP0vIdd6tHC9PTfenmRmuIFqktc6GH9cgi+ZHnsLWPvfSvc4DrYmKiQ==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@wry/equality": {
|
||||
"version": "0.5.7",
|
||||
"resolved": "https://registry.npmjs.org/@wry/equality/-/equality-0.5.7.tgz",
|
||||
"integrity": "sha512-BRFORjsTuQv5gxcXsuDXx6oGRhuVsEGwZy6LOzRRfgu+eSfxbhUQ9L9YtSEIuIjY/o7g3iWFjrc5eSY1GXP2Dw==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/@wry/trie": {
|
||||
"version": "0.5.0",
|
||||
"resolved": "https://registry.npmjs.org/@wry/trie/-/trie-0.5.0.tgz",
|
||||
"integrity": "sha512-FNoYzHawTMk/6KMQoEG5O4PuioX19UbwdQKF44yw0nLfOypfQdjtfZzo/UIJWAJ23sNIFbD1Ug9lbaDGMwbqQA==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/graphql": {
|
||||
"version": "16.8.1",
|
||||
"resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz",
|
||||
"integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==",
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/graphql-tag": {
|
||||
"version": "2.12.6",
|
||||
"resolved": "https://registry.npmjs.org/graphql-tag/-/graphql-tag-2.12.6.tgz",
|
||||
"integrity": "sha512-FdSNcu2QQcWnM2VNvSCCDCVS5PpPqpzgFT8+GXzqJuoDd0CBncxCY278u4mhRO7tMgo2JjgJA5aZ+nWSQ/Z+xg==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"graphql": "^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/graphql-ws": {
|
||||
"version": "5.15.0",
|
||||
"resolved": "https://registry.npmjs.org/graphql-ws/-/graphql-ws-5.15.0.tgz",
|
||||
"integrity": "sha512-xWGAtm3fig9TIhSaNsg0FaDZ8Pyn/3re3RFlP4rhQcmjRDIPpk1EhRuNB+YSJtLzttyuToaDiNhwT1OMoGnJnw==",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"graphql": ">=0.11 <=16"
|
||||
}
|
||||
},
|
||||
"node_modules/hoist-non-react-statics": {
|
||||
"version": "3.3.2",
|
||||
"resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
|
||||
"integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==",
|
||||
"dependencies": {
|
||||
"react-is": "^16.7.0"
|
||||
}
|
||||
},
|
||||
"node_modules/js-tokens": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
|
||||
"integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
|
||||
},
|
||||
"node_modules/loose-envify": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
|
||||
"integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
|
||||
"dependencies": {
|
||||
"js-tokens": "^3.0.0 || ^4.0.0"
|
||||
},
|
||||
"bin": {
|
||||
"loose-envify": "cli.js"
|
||||
}
|
||||
},
|
||||
"node_modules/object-assign": {
|
||||
"version": "4.1.1",
|
||||
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
|
||||
"integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/optimism": {
|
||||
"version": "0.18.0",
|
||||
"resolved": "https://registry.npmjs.org/optimism/-/optimism-0.18.0.tgz",
|
||||
"integrity": "sha512-tGn8+REwLRNFnb9WmcY5IfpOqeX2kpaYJ1s6Ae3mn12AeydLkR3j+jSCmVQFoXqU8D41PAJ1RG1rCRNWmNZVmQ==",
|
||||
"dependencies": {
|
||||
"@wry/caches": "^1.0.0",
|
||||
"@wry/context": "^0.7.0",
|
||||
"@wry/trie": "^0.4.3",
|
||||
"tslib": "^2.3.0"
|
||||
}
|
||||
},
|
||||
"node_modules/optimism/node_modules/@wry/trie": {
|
||||
"version": "0.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@wry/trie/-/trie-0.4.3.tgz",
|
||||
"integrity": "sha512-I6bHwH0fSf6RqQcnnXLJKhkSXG45MFral3GxPaY4uAl0LYDZM+YDVDAiU9bYwjTuysy1S0IeecWtmq1SZA3M1w==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/prop-types": {
|
||||
"version": "15.8.1",
|
||||
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
|
||||
"integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
|
||||
"dependencies": {
|
||||
"loose-envify": "^1.4.0",
|
||||
"object-assign": "^4.1.1",
|
||||
"react-is": "^16.13.1"
|
||||
}
|
||||
},
|
||||
"node_modules/react": {
|
||||
"version": "18.2.0",
|
||||
"resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
|
||||
"integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
|
||||
"dependencies": {
|
||||
"loose-envify": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/react-is": {
|
||||
"version": "16.13.1",
|
||||
"resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
|
||||
"integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
|
||||
},
|
||||
"node_modules/rehackt": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/rehackt/-/rehackt-0.0.5.tgz",
|
||||
"integrity": "sha512-BI1rV+miEkaHj8zd2n+gaMgzu/fKz7BGlb4zZ6HAiY9adDmJMkaDcmuXlJFv0eyKUob+oszs3/2gdnXUrzx2Tg==",
|
||||
"peerDependencies": {
|
||||
"@types/react": "*",
|
||||
"react": "*"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@types/react": {
|
||||
"optional": true
|
||||
},
|
||||
"react": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/response-iterator": {
|
||||
"version": "0.2.6",
|
||||
"resolved": "https://registry.npmjs.org/response-iterator/-/response-iterator-0.2.6.tgz",
|
||||
"integrity": "sha512-pVzEEzrsg23Sh053rmDUvLSkGXluZio0qu8VT6ukrYuvtjVfCbDZH9d6PGXb8HZfzdNZt8feXv/jvUzlhRgLnw==",
|
||||
"engines": {
|
||||
"node": ">=0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/symbol-observable": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-4.0.0.tgz",
|
||||
"integrity": "sha512-b19dMThMV4HVFynSAM1++gBHAbk2Tc/osgLIBZMKsyqh34jb2e8Os7T6ZW/Bt3pJFdBTd2JwAnAAEQV7rSNvcQ==",
|
||||
"engines": {
|
||||
"node": ">=0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/ts-invariant": {
|
||||
"version": "0.10.3",
|
||||
"resolved": "https://registry.npmjs.org/ts-invariant/-/ts-invariant-0.10.3.tgz",
|
||||
"integrity": "sha512-uivwYcQaxAucv1CzRp2n/QdYPo4ILf9VXgH19zEIjFx2EJufV16P0JtJVpYHy89DItG6Kwj2oIUjrcK5au+4tQ==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/tslib": {
|
||||
"version": "2.6.2",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz",
|
||||
"integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q=="
|
||||
},
|
||||
"node_modules/ws": {
|
||||
"version": "8.16.0",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz",
|
||||
"integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==",
|
||||
"engines": {
|
||||
"node": ">=10.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"bufferutil": "^4.0.1",
|
||||
"utf-8-validate": ">=5.0.2"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"bufferutil": {
|
||||
"optional": true
|
||||
},
|
||||
"utf-8-validate": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/zen-observable": {
|
||||
"version": "0.8.15",
|
||||
"resolved": "https://registry.npmjs.org/zen-observable/-/zen-observable-0.8.15.tgz",
|
||||
"integrity": "sha512-PQ2PC7R9rslx84ndNBZB/Dkv8V8fZEpk83RLgXtYd0fwUgEjseMn1Dgajh2x6S8QbZAFa9p2qVCEuYZNgve0dQ=="
|
||||
},
|
||||
"node_modules/zen-observable-ts": {
|
||||
"version": "1.2.5",
|
||||
"resolved": "https://registry.npmjs.org/zen-observable-ts/-/zen-observable-ts-1.2.5.tgz",
|
||||
"integrity": "sha512-QZWQekv6iB72Naeake9hS1KxHlotfRpe+WGNbNx5/ta+R3DNjVO2bswf63gXlWDcs+EMd7XY8HfVQyP1X6T4Zg==",
|
||||
"dependencies": {
|
||||
"zen-observable": "0.8.15"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
8
graphql-subscription/node_modules/@apollo/client/.changeset/README.md
generated
vendored
Normal file
8
graphql-subscription/node_modules/@apollo/client/.changeset/README.md
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# Changesets
|
||||
|
||||
Hello and welcome! This folder has been automatically generated by `@changesets/cli`, a build tool that works
|
||||
with multi-package repos, or single-package repos to help you version and publish your code. You can
|
||||
find the full documentation for it [in our repository](https://github.com/changesets/changesets)
|
||||
|
||||
We have a quick list of common questions to get you started engaging with this project in
|
||||
[our documentation](https://github.com/changesets/changesets/blob/main/docs/common-questions.md)
|
||||
14
graphql-subscription/node_modules/@apollo/client/.changeset/config.json
generated
vendored
Normal file
14
graphql-subscription/node_modules/@apollo/client/.changeset/config.json
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"$schema": "https://unpkg.com/@changesets/config@2.2.0/schema.json",
|
||||
"changelog": [
|
||||
"@changesets/changelog-github",
|
||||
{ "repo": "apollographql/apollo-client" }
|
||||
],
|
||||
"commit": false,
|
||||
"fixed": [],
|
||||
"linked": [],
|
||||
"access": "public",
|
||||
"baseBranch": "main",
|
||||
"updateInternalDependencies": "patch",
|
||||
"ignore": []
|
||||
}
|
||||
3734
graphql-subscription/node_modules/@apollo/client/CHANGELOG.md
generated
vendored
Normal file
3734
graphql-subscription/node_modules/@apollo/client/CHANGELOG.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
graphql-subscription/node_modules/@apollo/client/LICENSE
generated
vendored
Normal file
22
graphql-subscription/node_modules/@apollo/client/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2022 Apollo Graph, Inc. (Formerly Meteor Development Group, Inc.)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
51
graphql-subscription/node_modules/@apollo/client/README.md
generated
vendored
Normal file
51
graphql-subscription/node_modules/@apollo/client/README.md
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
<div align="center">
|
||||
|
||||
<p>
|
||||
<a href="https://www.apollographql.com/"><img src="https://raw.githubusercontent.com/apollographql/apollo-client-devtools/main/assets/apollo-wordmark.svg" height="100" alt="Apollo Client"></a>
|
||||
</p>
|
||||
<h1>Apollo Client</h1>
|
||||
|
||||
[](https://badge.fury.io/js/%40apollo%2Fclient) [](https://circleci.com/gh/apollographql/apollo-client) [](https://community.apollographql.com) [](https://discord.gg/graphos)
|
||||
|
||||
</div>
|
||||
|
||||
Apollo Client is a fully-featured caching GraphQL client with integrations for React, Angular, and more. It allows you to easily build UI components that fetch data via GraphQL.
|
||||
|
||||
| ☑️ Apollo Client User Survey |
|
||||
| :----- |
|
||||
| What do you like best about Apollo Client? What needs to be improved? Please tell us by taking a [one-minute survey](https://docs.google.com/forms/d/e/1FAIpQLSczNDXfJne3ZUOXjk9Ursm9JYvhTh1_nFTDfdq3XBAFWCzplQ/viewform?usp=pp_url&entry.1170701325=Apollo+Client&entry.204965213=Readme). Your responses will help us understand Apollo Client usage and allow us to serve you better. |
|
||||
|
||||
## Documentation
|
||||
|
||||
All Apollo Client documentation, including React integration articles and helpful recipes, can be found at: <br/>
|
||||
[https://www.apollographql.com/docs/react/](https://www.apollographql.com/docs/react/)
|
||||
|
||||
The Apollo Client API reference can be found at: <br/>
|
||||
[https://www.apollographql.com/docs/react/api/apollo-client/](https://www.apollographql.com/docs/react/api/apollo-client/)
|
||||
|
||||
Learn how to use Apollo Client with self-paced hands-on training on Odyssey, Apollo's official learning platform: <br/>
|
||||
[https://odyssey.apollographql.com/](https://odyssey.apollographql.com/)
|
||||
|
||||
## Maintainers
|
||||
|
||||
|Name|Username|
|
||||
|---|---|
|
||||
|Ben Newman|[@benjamn](https://github.com/benjamn)|
|
||||
|Alessia Bellisario|[@alessbell](https://github.com/alessbell)|
|
||||
|Jeff Auriemma|[@bignimbus](https://github.com/bignimbus)|
|
||||
|Hugh Willson|[@hwillson](https://github.com/hwillson)|
|
||||
|Jerel Miller|[@jerelmiller](https://github.com/jerelmiller)|
|
||||
|Lenz Weber-Tronic|[@phryneas](https://github.com/phryneas)|
|
||||
|
||||
## Who is Apollo?
|
||||
|
||||
[Apollo](https://apollographql.com/) builds open-source software and a graph platform to unify GraphQL across your apps and services. We help you ship faster with:
|
||||
|
||||
- [Apollo Studio](https://www.apollographql.com/studio/develop/) – A free, end-to-end platform for managing your GraphQL lifecycle. Track your GraphQL schemas in a hosted registry to create a source of truth for everything in your graph. Studio provides an IDE (Apollo Explorer) so you can explore data, collaborate on queries, observe usage, and safely make schema changes.
|
||||
- [Apollo Federation](https://www.apollographql.com/apollo-federation) – The industry-standard open architecture for building a distributed graph. Use Apollo’s gateway to compose a unified graph from multiple subgraphs, determine a query plan, and route requests across your services.
|
||||
- [Apollo Client](https://www.apollographql.com/apollo-client/) – The most popular GraphQL client for the web. Apollo also builds and maintains [Apollo iOS](https://github.com/apollographql/apollo-ios) and [Apollo Kotlin](https://github.com/apollographql/apollo-kotlin).
|
||||
- [Apollo Server](https://www.apollographql.com/docs/apollo-server/) – A production-ready JavaScript GraphQL server that connects to any microservice, API, or database. Compatible with all popular JavaScript frameworks and deployable in serverless environments.
|
||||
|
||||
## Learn how to build with Apollo
|
||||
|
||||
Check out the [Odyssey](https://odyssey.apollographql.com/) learning platform, the perfect place to start your GraphQL journey with videos and interactive code challenges. Join the [Apollo Community](https://community.apollographql.com/) to interact with and get technical help from the GraphQL community.
|
||||
8661
graphql-subscription/node_modules/@apollo/client/apollo-client.cjs
generated
vendored
Normal file
8661
graphql-subscription/node_modules/@apollo/client/apollo-client.cjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
graphql-subscription/node_modules/@apollo/client/apollo-client.cjs.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/apollo-client.cjs.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
graphql-subscription/node_modules/@apollo/client/apollo-client.min.cjs
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/apollo-client.min.cjs
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2567
graphql-subscription/node_modules/@apollo/client/cache/cache.cjs
generated
vendored
Normal file
2567
graphql-subscription/node_modules/@apollo/client/cache/cache.cjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
graphql-subscription/node_modules/@apollo/client/cache/cache.cjs.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/cache.cjs.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2567
graphql-subscription/node_modules/@apollo/client/cache/cache.cjs.native.js
generated
vendored
Normal file
2567
graphql-subscription/node_modules/@apollo/client/cache/cache.cjs.native.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
52
graphql-subscription/node_modules/@apollo/client/cache/core/cache.d.ts
generated
vendored
Normal file
52
graphql-subscription/node_modules/@apollo/client/cache/core/cache.d.ts
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
import type { DocumentNode } from "graphql";
|
||||
import type { StoreObject, Reference } from "../../utilities/index.js";
|
||||
import type { DataProxy } from "./types/DataProxy.js";
|
||||
import type { Cache } from "./types/Cache.js";
|
||||
import { getApolloCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
export type Transaction<T> = (c: ApolloCache<T>) => void;
|
||||
export declare abstract class ApolloCache<TSerialized> implements DataProxy {
|
||||
readonly assumeImmutableResults: boolean;
|
||||
abstract read<TData = any, TVariables = any>(query: Cache.ReadOptions<TVariables, TData>): TData | null;
|
||||
abstract write<TData = any, TVariables = any>(write: Cache.WriteOptions<TData, TVariables>): Reference | undefined;
|
||||
abstract diff<T>(query: Cache.DiffOptions): Cache.DiffResult<T>;
|
||||
abstract watch<TData = any, TVariables = any>(watch: Cache.WatchOptions<TData, TVariables>): () => void;
|
||||
abstract reset(options?: Cache.ResetOptions): Promise<void>;
|
||||
abstract evict(options: Cache.EvictOptions): boolean;
|
||||
/**
|
||||
* Replaces existing state in the cache (if any) with the values expressed by
|
||||
* `serializedState`.
|
||||
*
|
||||
* Called when hydrating a cache (server side rendering, or offline storage),
|
||||
* and also (potentially) during hot reloads.
|
||||
*/
|
||||
abstract restore(serializedState: TSerialized): ApolloCache<TSerialized>;
|
||||
/**
|
||||
* Exposes the cache's complete state, in a serializable format for later restoration.
|
||||
*/
|
||||
abstract extract(optimistic?: boolean): TSerialized;
|
||||
abstract removeOptimistic(id: string): void;
|
||||
batch<U>(options: Cache.BatchOptions<this, U>): U;
|
||||
abstract performTransaction(transaction: Transaction<TSerialized>, optimisticId?: string | null): void;
|
||||
recordOptimisticTransaction(transaction: Transaction<TSerialized>, optimisticId: string): void;
|
||||
transformDocument(document: DocumentNode): DocumentNode;
|
||||
transformForLink(document: DocumentNode): DocumentNode;
|
||||
identify(object: StoreObject | Reference): string | undefined;
|
||||
gc(): string[];
|
||||
modify<Entity extends Record<string, any> = Record<string, any>>(options: Cache.ModifyOptions<Entity>): boolean;
|
||||
readQuery<QueryType, TVariables = any>(options: Cache.ReadQueryOptions<QueryType, TVariables>, optimistic?: boolean): QueryType | null;
|
||||
private getFragmentDoc;
|
||||
readFragment<FragmentType, TVariables = any>(options: Cache.ReadFragmentOptions<FragmentType, TVariables>, optimistic?: boolean): FragmentType | null;
|
||||
writeQuery<TData = any, TVariables = any>({ id, data, ...options }: Cache.WriteQueryOptions<TData, TVariables>): Reference | undefined;
|
||||
writeFragment<TData = any, TVariables = any>({ id, data, fragment, fragmentName, ...options }: Cache.WriteFragmentOptions<TData, TVariables>): Reference | undefined;
|
||||
updateQuery<TData = any, TVariables = any>(options: Cache.UpdateQueryOptions<TData, TVariables>, update: (data: TData | null) => TData | null | void): TData | null;
|
||||
updateFragment<TData = any, TVariables = any>(options: Cache.UpdateFragmentOptions<TData, TVariables>, update: (data: TData | null) => TData | null | void): TData | null;
|
||||
/**
|
||||
* @experimental
|
||||
* @internal
|
||||
* This is not a stable API - it is used in development builds to expose
|
||||
* information to the DevTools.
|
||||
* Use at your own risk!
|
||||
*/
|
||||
getMemoryInternals?: typeof getApolloCacheMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=cache.d.ts.map
|
||||
109
graphql-subscription/node_modules/@apollo/client/cache/core/cache.js
generated
vendored
Normal file
109
graphql-subscription/node_modules/@apollo/client/cache/core/cache.js
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
import { __assign, __rest } from "tslib";
|
||||
import { wrap } from "optimism";
|
||||
import { cacheSizes, getFragmentQueryDocument, } from "../../utilities/index.js";
|
||||
import { WeakCache } from "@wry/caches";
|
||||
import { getApolloCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
var ApolloCache = /** @class */ (function () {
|
||||
function ApolloCache() {
|
||||
this.assumeImmutableResults = false;
|
||||
// Make sure we compute the same (===) fragment query document every
|
||||
// time we receive the same fragment in readFragment.
|
||||
this.getFragmentDoc = wrap(getFragmentQueryDocument, {
|
||||
max: cacheSizes["cache.fragmentQueryDocuments"] ||
|
||||
1000 /* defaultCacheSizes["cache.fragmentQueryDocuments"] */,
|
||||
cache: WeakCache,
|
||||
});
|
||||
}
|
||||
// Transactional API
|
||||
// The batch method is intended to replace/subsume both performTransaction
|
||||
// and recordOptimisticTransaction, but performTransaction came first, so we
|
||||
// provide a default batch implementation that's just another way of calling
|
||||
// performTransaction. Subclasses of ApolloCache (such as InMemoryCache) can
|
||||
// override the batch method to do more interesting things with its options.
|
||||
ApolloCache.prototype.batch = function (options) {
|
||||
var _this = this;
|
||||
var optimisticId = typeof options.optimistic === "string" ? options.optimistic
|
||||
: options.optimistic === false ? null
|
||||
: void 0;
|
||||
var updateResult;
|
||||
this.performTransaction(function () { return (updateResult = options.update(_this)); }, optimisticId);
|
||||
return updateResult;
|
||||
};
|
||||
ApolloCache.prototype.recordOptimisticTransaction = function (transaction, optimisticId) {
|
||||
this.performTransaction(transaction, optimisticId);
|
||||
};
|
||||
// Optional API
|
||||
// Called once per input document, allowing the cache to make static changes
|
||||
// to the query, such as adding __typename fields.
|
||||
ApolloCache.prototype.transformDocument = function (document) {
|
||||
return document;
|
||||
};
|
||||
// Called before each ApolloLink request, allowing the cache to make dynamic
|
||||
// changes to the query, such as filling in missing fragment definitions.
|
||||
ApolloCache.prototype.transformForLink = function (document) {
|
||||
return document;
|
||||
};
|
||||
ApolloCache.prototype.identify = function (object) {
|
||||
return;
|
||||
};
|
||||
ApolloCache.prototype.gc = function () {
|
||||
return [];
|
||||
};
|
||||
ApolloCache.prototype.modify = function (options) {
|
||||
return false;
|
||||
};
|
||||
// DataProxy API
|
||||
ApolloCache.prototype.readQuery = function (options, optimistic) {
|
||||
if (optimistic === void 0) { optimistic = !!options.optimistic; }
|
||||
return this.read(__assign(__assign({}, options), { rootId: options.id || "ROOT_QUERY", optimistic: optimistic }));
|
||||
};
|
||||
ApolloCache.prototype.readFragment = function (options, optimistic) {
|
||||
if (optimistic === void 0) { optimistic = !!options.optimistic; }
|
||||
return this.read(__assign(__assign({}, options), { query: this.getFragmentDoc(options.fragment, options.fragmentName), rootId: options.id, optimistic: optimistic }));
|
||||
};
|
||||
ApolloCache.prototype.writeQuery = function (_a) {
|
||||
var id = _a.id, data = _a.data, options = __rest(_a, ["id", "data"]);
|
||||
return this.write(Object.assign(options, {
|
||||
dataId: id || "ROOT_QUERY",
|
||||
result: data,
|
||||
}));
|
||||
};
|
||||
ApolloCache.prototype.writeFragment = function (_a) {
|
||||
var id = _a.id, data = _a.data, fragment = _a.fragment, fragmentName = _a.fragmentName, options = __rest(_a, ["id", "data", "fragment", "fragmentName"]);
|
||||
return this.write(Object.assign(options, {
|
||||
query: this.getFragmentDoc(fragment, fragmentName),
|
||||
dataId: id,
|
||||
result: data,
|
||||
}));
|
||||
};
|
||||
ApolloCache.prototype.updateQuery = function (options, update) {
|
||||
return this.batch({
|
||||
update: function (cache) {
|
||||
var value = cache.readQuery(options);
|
||||
var data = update(value);
|
||||
if (data === void 0 || data === null)
|
||||
return value;
|
||||
cache.writeQuery(__assign(__assign({}, options), { data: data }));
|
||||
return data;
|
||||
},
|
||||
});
|
||||
};
|
||||
ApolloCache.prototype.updateFragment = function (options, update) {
|
||||
return this.batch({
|
||||
update: function (cache) {
|
||||
var value = cache.readFragment(options);
|
||||
var data = update(value);
|
||||
if (data === void 0 || data === null)
|
||||
return value;
|
||||
cache.writeFragment(__assign(__assign({}, options), { data: data }));
|
||||
return data;
|
||||
},
|
||||
});
|
||||
};
|
||||
return ApolloCache;
|
||||
}());
|
||||
export { ApolloCache };
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
ApolloCache.prototype.getMemoryInternals = getApolloCacheMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=cache.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/core/cache.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/core/cache.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
62
graphql-subscription/node_modules/@apollo/client/cache/core/types/Cache.d.ts
generated
vendored
Normal file
62
graphql-subscription/node_modules/@apollo/client/cache/core/types/Cache.d.ts
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
import { DataProxy } from "./DataProxy.js";
|
||||
import type { AllFieldsModifier, Modifiers } from "./common.js";
|
||||
import type { ApolloCache } from "../cache.js";
|
||||
export declare namespace Cache {
|
||||
type WatchCallback<TData = any> = (diff: Cache.DiffResult<TData>, lastDiff?: Cache.DiffResult<TData>) => void;
|
||||
interface ReadOptions<TVariables = any, TData = any> extends DataProxy.Query<TVariables, TData> {
|
||||
rootId?: string;
|
||||
previousResult?: any;
|
||||
optimistic: boolean;
|
||||
returnPartialData?: boolean;
|
||||
/**
|
||||
* @deprecated
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not
|
||||
* recommend using this option anymore.
|
||||
* A future version of Apollo Client will contain a similar feature without
|
||||
* the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
}
|
||||
interface WriteOptions<TResult = any, TVariables = any> extends Omit<DataProxy.Query<TVariables, TResult>, "id">, Omit<DataProxy.WriteOptions<TResult>, "data"> {
|
||||
dataId?: string;
|
||||
result: TResult;
|
||||
}
|
||||
interface DiffOptions<TData = any, TVariables = any> extends Omit<ReadOptions<TVariables, TData>, "rootId"> {
|
||||
}
|
||||
interface WatchOptions<TData = any, TVariables = any> extends DiffOptions<TData, TVariables> {
|
||||
watcher?: object;
|
||||
immediate?: boolean;
|
||||
callback: WatchCallback<TData>;
|
||||
lastDiff?: DiffResult<TData>;
|
||||
}
|
||||
interface EvictOptions {
|
||||
id?: string;
|
||||
fieldName?: string;
|
||||
args?: Record<string, any>;
|
||||
broadcast?: boolean;
|
||||
}
|
||||
interface ResetOptions {
|
||||
discardWatches?: boolean;
|
||||
}
|
||||
interface ModifyOptions<Entity extends Record<string, any> = Record<string, any>> {
|
||||
id?: string;
|
||||
fields: Modifiers<Entity> | AllFieldsModifier<Entity>;
|
||||
optimistic?: boolean;
|
||||
broadcast?: boolean;
|
||||
}
|
||||
interface BatchOptions<TCache extends ApolloCache<any>, TUpdateResult = void> {
|
||||
update(cache: TCache): TUpdateResult;
|
||||
optimistic?: string | boolean;
|
||||
removeOptimistic?: string;
|
||||
onWatchUpdated?: (this: TCache, watch: Cache.WatchOptions, diff: Cache.DiffResult<any>, lastDiff?: Cache.DiffResult<any> | undefined) => any;
|
||||
}
|
||||
export import DiffResult = DataProxy.DiffResult;
|
||||
export import ReadQueryOptions = DataProxy.ReadQueryOptions;
|
||||
export import ReadFragmentOptions = DataProxy.ReadFragmentOptions;
|
||||
export import WriteQueryOptions = DataProxy.WriteQueryOptions;
|
||||
export import WriteFragmentOptions = DataProxy.WriteFragmentOptions;
|
||||
export import UpdateQueryOptions = DataProxy.UpdateQueryOptions;
|
||||
export import UpdateFragmentOptions = DataProxy.UpdateFragmentOptions;
|
||||
export import Fragment = DataProxy.Fragment;
|
||||
}
|
||||
//# sourceMappingURL=Cache.d.ts.map
|
||||
4
graphql-subscription/node_modules/@apollo/client/cache/core/types/Cache.js
generated
vendored
Normal file
4
graphql-subscription/node_modules/@apollo/client/cache/core/types/Cache.js
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
export var Cache;
|
||||
(function (Cache) {
|
||||
})(Cache || (Cache = {}));
|
||||
//# sourceMappingURL=Cache.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/core/types/Cache.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/core/types/Cache.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"Cache.js","sourceRoot":"","sources":["../../../../src/cache/core/types/Cache.ts"],"names":[],"mappings":"AAIA,MAAM,KAAW,KAAK,CA8GrB;AA9GD,WAAiB,KAAK;AA8GtB,CAAC,EA9GgB,KAAK,KAAL,KAAK,QA8GrB","sourcesContent":["import { DataProxy } from \"./DataProxy.js\";\nimport type { AllFieldsModifier, Modifiers } from \"./common.js\";\nimport type { ApolloCache } from \"../cache.js\";\n\nexport namespace Cache {\n export type WatchCallback<TData = any> = (\n diff: Cache.DiffResult<TData>,\n lastDiff?: Cache.DiffResult<TData>\n ) => void;\n\n export interface ReadOptions<TVariables = any, TData = any>\n extends DataProxy.Query<TVariables, TData> {\n rootId?: string;\n previousResult?: any;\n optimistic: boolean;\n returnPartialData?: boolean;\n /**\n * @deprecated\n * Using `canonizeResults` can result in memory leaks so we generally do not\n * recommend using this option anymore.\n * A future version of Apollo Client will contain a similar feature without\n * the risk of memory leaks.\n */\n canonizeResults?: boolean;\n }\n\n export interface WriteOptions<TResult = any, TVariables = any>\n extends Omit<DataProxy.Query<TVariables, TResult>, \"id\">,\n Omit<DataProxy.WriteOptions<TResult>, \"data\"> {\n dataId?: string;\n result: TResult;\n }\n\n export interface DiffOptions<TData = any, TVariables = any>\n extends Omit<ReadOptions<TVariables, TData>, \"rootId\"> {\n // The DiffOptions interface is currently just an alias for\n // ReadOptions, though DiffOptions used to be responsible for\n // declaring the returnPartialData option.\n }\n\n export interface WatchOptions<TData = any, TVariables = any>\n extends DiffOptions<TData, TVariables> {\n watcher?: object;\n immediate?: boolean;\n callback: WatchCallback<TData>;\n lastDiff?: DiffResult<TData>;\n }\n\n export interface EvictOptions {\n id?: string;\n fieldName?: string;\n args?: Record<string, any>;\n broadcast?: boolean;\n }\n\n // Although you can call cache.reset() without options, its behavior can be\n // configured by passing a Cache.ResetOptions object.\n export interface ResetOptions {\n discardWatches?: boolean;\n }\n\n export interface ModifyOptions<\n Entity extends Record<string, any> = Record<string, any>,\n > {\n id?: string;\n fields: Modifiers<Entity> | AllFieldsModifier<Entity>;\n optimistic?: boolean;\n broadcast?: boolean;\n }\n\n export interface BatchOptions<\n TCache extends ApolloCache<any>,\n TUpdateResult = void,\n > {\n // Same as the first parameter of performTransaction, except the cache\n // argument will have the subclass type rather than ApolloCache.\n update(cache: TCache): TUpdateResult;\n\n // Passing a string for this option creates a new optimistic layer, with the\n // given string as its layer.id, just like passing a string for the\n // optimisticId parameter of performTransaction. Passing true is the same as\n // passing undefined to performTransaction (running the batch operation\n // against the current top layer of the cache), and passing false is the\n // same as passing null (running the operation against root/non-optimistic\n // cache data).\n optimistic?: string | boolean;\n\n // If you specify the ID of an optimistic layer using this option, that\n // layer will be removed as part of the batch transaction, triggering at\n // most one broadcast for both the transaction and the removal of the layer.\n // Note: this option is needed because calling cache.removeOptimistic during\n // the transaction function may not be not safe, since any modifications to\n // cache layers may be discarded after the transaction finishes.\n removeOptimistic?: string;\n\n // If you want to find out which watched queries were invalidated during\n // this batch operation, pass this optional callback function. Returning\n // false from the callback will prevent broadcasting this result.\n onWatchUpdated?: (\n this: TCache,\n watch: Cache.WatchOptions,\n diff: Cache.DiffResult<any>,\n lastDiff?: Cache.DiffResult<any> | undefined\n ) => any;\n }\n\n export import DiffResult = DataProxy.DiffResult;\n export import ReadQueryOptions = DataProxy.ReadQueryOptions;\n export import ReadFragmentOptions = DataProxy.ReadFragmentOptions;\n export import WriteQueryOptions = DataProxy.WriteQueryOptions;\n export import WriteFragmentOptions = DataProxy.WriteFragmentOptions;\n export import UpdateQueryOptions = DataProxy.UpdateQueryOptions;\n export import UpdateFragmentOptions = DataProxy.UpdateFragmentOptions;\n export import Fragment = DataProxy.Fragment;\n}\n"]}
|
||||
149
graphql-subscription/node_modules/@apollo/client/cache/core/types/DataProxy.d.ts
generated
vendored
Normal file
149
graphql-subscription/node_modules/@apollo/client/cache/core/types/DataProxy.d.ts
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
import type { DocumentNode } from "graphql";
|
||||
import type { TypedDocumentNode } from "@graphql-typed-document-node/core";
|
||||
import type { MissingFieldError } from "./common.js";
|
||||
import type { Reference } from "../../../utilities/index.js";
|
||||
export declare namespace DataProxy {
|
||||
interface Query<TVariables, TData> {
|
||||
/**
|
||||
* The GraphQL query shape to be used constructed using the `gql` template
|
||||
* string tag from `graphql-tag`. The query will be used to determine the
|
||||
* shape of the data to be read.
|
||||
*/
|
||||
query: DocumentNode | TypedDocumentNode<TData, TVariables>;
|
||||
/**
|
||||
* Any variables that the GraphQL query may depend on.
|
||||
*/
|
||||
variables?: TVariables;
|
||||
/**
|
||||
* The root id to be used. Defaults to "ROOT_QUERY", which is the ID of the
|
||||
* root query object. This property makes writeQuery capable of writing data
|
||||
* to any object in the cache.
|
||||
*/
|
||||
id?: string;
|
||||
}
|
||||
interface Fragment<TVariables, TData> {
|
||||
/**
|
||||
* The root id to be used. This id should take the same form as the
|
||||
* value returned by your `dataIdFromObject` function. If a value with your
|
||||
* id does not exist in the store, `null` will be returned.
|
||||
*/
|
||||
id?: string;
|
||||
/**
|
||||
* A GraphQL document created using the `gql` template string tag from
|
||||
* `graphql-tag` with one or more fragments which will be used to determine
|
||||
* the shape of data to read. If you provide more than one fragment in this
|
||||
* document then you must also specify `fragmentName` to select a single.
|
||||
*/
|
||||
fragment: DocumentNode | TypedDocumentNode<TData, TVariables>;
|
||||
/**
|
||||
* The name of the fragment in your GraphQL document to be used. If you do
|
||||
* not provide a `fragmentName` and there is only one fragment in your
|
||||
* `fragment` document then that fragment will be used.
|
||||
*/
|
||||
fragmentName?: string;
|
||||
/**
|
||||
* Any variables that your GraphQL fragments depend on.
|
||||
*/
|
||||
variables?: TVariables;
|
||||
}
|
||||
interface ReadQueryOptions<TData, TVariables> extends Query<TVariables, TData> {
|
||||
/**
|
||||
* Whether to return incomplete data rather than null.
|
||||
* Defaults to false.
|
||||
*/
|
||||
returnPartialData?: boolean;
|
||||
/**
|
||||
* Whether to read from optimistic or non-optimistic cache data. If
|
||||
* this named option is provided, the optimistic parameter of the
|
||||
* readQuery method can be omitted. Defaults to false.
|
||||
*/
|
||||
optimistic?: boolean;
|
||||
/**
|
||||
* Whether to canonize cache results before returning them. Canonization takes some extra time, but it speeds up future deep equality comparisons. Defaults to false.
|
||||
*
|
||||
* @deprecated
|
||||
*
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not recommend using this option anymore. A future version of Apollo Client will contain a similar feature without the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
}
|
||||
interface ReadFragmentOptions<TData, TVariables> extends Fragment<TVariables, TData> {
|
||||
/**
|
||||
* Whether to return incomplete data rather than null.
|
||||
* Defaults to false.
|
||||
*/
|
||||
returnPartialData?: boolean;
|
||||
/**
|
||||
* Whether to read from optimistic or non-optimistic cache data. If
|
||||
* this named option is provided, the optimistic parameter of the
|
||||
* readQuery method can be omitted. Defaults to false.
|
||||
*/
|
||||
optimistic?: boolean;
|
||||
/**
|
||||
* Whether to canonize cache results before returning them. Canonization takes some extra time, but it speeds up future deep equality comparisons. Defaults to false.
|
||||
*
|
||||
* @deprecated
|
||||
*
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not recommend using this option anymore. A future version of Apollo Client will contain a similar feature without the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
}
|
||||
interface WriteOptions<TData> {
|
||||
/**
|
||||
* The data you will be writing to the store.
|
||||
*/
|
||||
data: TData;
|
||||
/**
|
||||
* Whether to notify query watchers (default: true).
|
||||
*/
|
||||
broadcast?: boolean;
|
||||
/**
|
||||
* When true, ignore existing field data rather than merging it with
|
||||
* incoming data (default: false).
|
||||
*/
|
||||
overwrite?: boolean;
|
||||
}
|
||||
interface WriteQueryOptions<TData, TVariables> extends Query<TVariables, TData>, WriteOptions<TData> {
|
||||
}
|
||||
interface WriteFragmentOptions<TData, TVariables> extends Fragment<TVariables, TData>, WriteOptions<TData> {
|
||||
}
|
||||
interface UpdateQueryOptions<TData, TVariables> extends Omit<ReadQueryOptions<TData, TVariables> & WriteQueryOptions<TData, TVariables>, "data"> {
|
||||
}
|
||||
interface UpdateFragmentOptions<TData, TVariables> extends Omit<ReadFragmentOptions<TData, TVariables> & WriteFragmentOptions<TData, TVariables>, "data"> {
|
||||
}
|
||||
type DiffResult<T> = {
|
||||
result?: T;
|
||||
complete?: boolean;
|
||||
missing?: MissingFieldError[];
|
||||
fromOptimisticTransaction?: boolean;
|
||||
};
|
||||
}
|
||||
/**
|
||||
* A proxy to the normalized data living in our store. This interface allows a
|
||||
* user to read and write denormalized data which feels natural to the user
|
||||
* whilst in the background this data is being converted into the normalized
|
||||
* store format.
|
||||
*/
|
||||
export interface DataProxy {
|
||||
/**
|
||||
* Reads a GraphQL query from the root query id.
|
||||
*/
|
||||
readQuery<QueryType, TVariables = any>(options: DataProxy.ReadQueryOptions<QueryType, TVariables>, optimistic?: boolean): QueryType | null;
|
||||
/**
|
||||
* Reads a GraphQL fragment from any arbitrary id. If there is more than
|
||||
* one fragment in the provided document then a `fragmentName` must be
|
||||
* provided to select the correct fragment.
|
||||
*/
|
||||
readFragment<FragmentType, TVariables = any>(options: DataProxy.ReadFragmentOptions<FragmentType, TVariables>, optimistic?: boolean): FragmentType | null;
|
||||
/**
|
||||
* Writes a GraphQL query to the root query id.
|
||||
*/
|
||||
writeQuery<TData = any, TVariables = any>(options: DataProxy.WriteQueryOptions<TData, TVariables>): Reference | undefined;
|
||||
/**
|
||||
* Writes a GraphQL fragment to any arbitrary id. If there is more than
|
||||
* one fragment in the provided document then a `fragmentName` must be
|
||||
* provided to select the correct fragment.
|
||||
*/
|
||||
writeFragment<TData = any, TVariables = any>(options: DataProxy.WriteFragmentOptions<TData, TVariables>): Reference | undefined;
|
||||
}
|
||||
//# sourceMappingURL=DataProxy.d.ts.map
|
||||
2
graphql-subscription/node_modules/@apollo/client/cache/core/types/DataProxy.js
generated
vendored
Normal file
2
graphql-subscription/node_modules/@apollo/client/cache/core/types/DataProxy.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=DataProxy.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/core/types/DataProxy.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/core/types/DataProxy.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
62
graphql-subscription/node_modules/@apollo/client/cache/core/types/common.d.ts
generated
vendored
Normal file
62
graphql-subscription/node_modules/@apollo/client/cache/core/types/common.d.ts
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
import type { DocumentNode, FieldNode } from "graphql";
|
||||
import type { Reference, StoreObject, StoreValue, isReference, AsStoreObject } from "../../../utilities/index.js";
|
||||
import type { StorageType } from "../../inmemory/policies.js";
|
||||
export type SafeReadonly<T> = T extends object ? Readonly<T> : T;
|
||||
export type MissingTree = string | {
|
||||
readonly [key: string]: MissingTree;
|
||||
};
|
||||
export declare class MissingFieldError extends Error {
|
||||
readonly message: string;
|
||||
readonly path: MissingTree | Array<string | number>;
|
||||
readonly query: DocumentNode;
|
||||
readonly variables?: Record<string, any> | undefined;
|
||||
constructor(message: string, path: MissingTree | Array<string | number>, query: DocumentNode, variables?: Record<string, any> | undefined);
|
||||
readonly missing: MissingTree;
|
||||
}
|
||||
export interface FieldSpecifier {
|
||||
typename?: string;
|
||||
fieldName: string;
|
||||
field?: FieldNode;
|
||||
args?: Record<string, any>;
|
||||
variables?: Record<string, any>;
|
||||
}
|
||||
export interface ReadFieldOptions extends FieldSpecifier {
|
||||
from?: StoreObject | Reference;
|
||||
}
|
||||
export interface ReadFieldFunction {
|
||||
<V = StoreValue>(options: ReadFieldOptions): SafeReadonly<V> | undefined;
|
||||
<V = StoreValue>(fieldName: string, from?: StoreObject | Reference): SafeReadonly<V> | undefined;
|
||||
}
|
||||
export type ToReferenceFunction = (objOrIdOrRef: StoreObject | string | Reference, mergeIntoStore?: boolean) => Reference | undefined;
|
||||
export type CanReadFunction = (value: StoreValue) => boolean;
|
||||
declare const _deleteModifier: unique symbol;
|
||||
export interface DeleteModifier {
|
||||
[_deleteModifier]: true;
|
||||
}
|
||||
declare const _invalidateModifier: unique symbol;
|
||||
export interface InvalidateModifier {
|
||||
[_invalidateModifier]: true;
|
||||
}
|
||||
declare const _ignoreModifier: unique symbol;
|
||||
export interface IgnoreModifier {
|
||||
[_ignoreModifier]: true;
|
||||
}
|
||||
export type ModifierDetails = {
|
||||
DELETE: DeleteModifier;
|
||||
INVALIDATE: InvalidateModifier;
|
||||
fieldName: string;
|
||||
storeFieldName: string;
|
||||
readField: ReadFieldFunction;
|
||||
canRead: CanReadFunction;
|
||||
isReference: typeof isReference;
|
||||
toReference: ToReferenceFunction;
|
||||
storage: StorageType;
|
||||
};
|
||||
export type Modifier<T> = (value: T, details: ModifierDetails) => T | DeleteModifier | InvalidateModifier;
|
||||
type StoreObjectValueMaybeReference<StoreVal> = StoreVal extends Array<Record<string, any>> ? StoreVal extends Array<infer Item> ? Item extends Record<string, any> ? ReadonlyArray<AsStoreObject<Item> | Reference> : never : never : StoreVal extends Record<string, any> ? AsStoreObject<StoreVal> | Reference : StoreVal;
|
||||
export type AllFieldsModifier<Entity extends Record<string, any>> = Modifier<Entity[keyof Entity] extends infer Value ? StoreObjectValueMaybeReference<Exclude<Value, undefined>> : never>;
|
||||
export type Modifiers<T extends Record<string, any> = Record<string, unknown>> = Partial<{
|
||||
[FieldName in keyof T]: Modifier<StoreObjectValueMaybeReference<Exclude<T[FieldName], undefined>>>;
|
||||
}>;
|
||||
export {};
|
||||
//# sourceMappingURL=common.d.ts.map
|
||||
29
graphql-subscription/node_modules/@apollo/client/cache/core/types/common.js
generated
vendored
Normal file
29
graphql-subscription/node_modules/@apollo/client/cache/core/types/common.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import { __extends } from "tslib";
|
||||
var MissingFieldError = /** @class */ (function (_super) {
|
||||
__extends(MissingFieldError, _super);
|
||||
function MissingFieldError(message, path, query, variables) {
|
||||
var _a;
|
||||
// 'Error' breaks prototype chain here
|
||||
var _this = _super.call(this, message) || this;
|
||||
_this.message = message;
|
||||
_this.path = path;
|
||||
_this.query = query;
|
||||
_this.variables = variables;
|
||||
if (Array.isArray(_this.path)) {
|
||||
_this.missing = _this.message;
|
||||
for (var i = _this.path.length - 1; i >= 0; --i) {
|
||||
_this.missing = (_a = {}, _a[_this.path[i]] = _this.missing, _a);
|
||||
}
|
||||
}
|
||||
else {
|
||||
_this.missing = _this.path;
|
||||
}
|
||||
// We're not using `Object.setPrototypeOf` here as it isn't fully supported
|
||||
// on Android (see issue #3236).
|
||||
_this.__proto__ = MissingFieldError.prototype;
|
||||
return _this;
|
||||
}
|
||||
return MissingFieldError;
|
||||
}(Error));
|
||||
export { MissingFieldError };
|
||||
//# sourceMappingURL=common.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/core/types/common.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/core/types/common.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"common.js","sourceRoot":"","sources":["../../../../src/cache/core/types/common.ts"],"names":[],"mappings":";AA2BA;IAAuC,qCAAK;IAC1C,2BACkB,OAAe,EACf,IAA0C,EAC1C,KAAmB,EACnB,SAA+B;;QAE/C,sCAAsC;QACtC,YAAA,MAAK,YAAC,OAAO,CAAC,SAAC;QANC,aAAO,GAAP,OAAO,CAAQ;QACf,UAAI,GAAJ,IAAI,CAAsC;QAC1C,WAAK,GAAL,KAAK,CAAc;QACnB,eAAS,GAAT,SAAS,CAAsB;QAK/C,IAAI,KAAK,CAAC,OAAO,CAAC,KAAI,CAAC,IAAI,CAAC,EAAE,CAAC;YAC7B,KAAI,CAAC,OAAO,GAAG,KAAI,CAAC,OAAO,CAAC;YAC5B,KAAK,IAAI,CAAC,GAAG,KAAI,CAAC,IAAI,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC;gBAC/C,KAAI,CAAC,OAAO,aAAK,GAAC,KAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAG,KAAI,CAAC,OAAO,KAAE,CAAC;YAClD,CAAC;QACH,CAAC;aAAM,CAAC;YACN,KAAI,CAAC,OAAO,GAAG,KAAI,CAAC,IAAI,CAAC;QAC3B,CAAC;QAED,2EAA2E;QAC3E,gCAAgC;QAC/B,KAAY,CAAC,SAAS,GAAG,iBAAiB,CAAC,SAAS,CAAC;;IACxD,CAAC;IAGH,wBAAC;AAAD,CAAC,AAzBD,CAAuC,KAAK,GAyB3C","sourcesContent":["import type { DocumentNode, FieldNode } from \"graphql\";\n\nimport type {\n Reference,\n StoreObject,\n StoreValue,\n isReference,\n AsStoreObject,\n} from \"../../../utilities/index.js\";\n\nimport type { StorageType } from \"../../inmemory/policies.js\";\n\n// The Readonly<T> type only really works for object types, since it marks\n// all of the object's properties as readonly, but there are many cases when\n// a generic type parameter like TExisting might be a string or some other\n// primitive type, in which case we need to avoid wrapping it with Readonly.\n// SafeReadonly<string> collapses to just string, which makes string\n// assignable to SafeReadonly<any>, whereas string is not assignable to\n// Readonly<any>, somewhat surprisingly.\nexport type SafeReadonly<T> = T extends object ? Readonly<T> : T;\n\nexport type MissingTree =\n | string\n | {\n readonly [key: string]: MissingTree;\n };\n\nexport class MissingFieldError extends Error {\n constructor(\n public readonly message: string,\n public readonly path: MissingTree | Array<string | number>,\n public readonly query: DocumentNode,\n public readonly variables?: Record<string, any>\n ) {\n // 'Error' breaks prototype chain here\n super(message);\n\n if (Array.isArray(this.path)) {\n this.missing = this.message;\n for (let i = this.path.length - 1; i >= 0; --i) {\n this.missing = { [this.path[i]]: this.missing };\n }\n } else {\n this.missing = this.path;\n }\n\n // We're not using `Object.setPrototypeOf` here as it isn't fully supported\n // on Android (see issue #3236).\n (this as any).__proto__ = MissingFieldError.prototype;\n }\n\n public readonly missing: MissingTree;\n}\n\nexport interface FieldSpecifier {\n typename?: string;\n fieldName: string;\n field?: FieldNode;\n args?: Record<string, any>;\n variables?: Record<string, any>;\n}\n\nexport interface ReadFieldOptions extends FieldSpecifier {\n from?: StoreObject | Reference;\n}\n\nexport interface ReadFieldFunction {\n <V = StoreValue>(options: ReadFieldOptions): SafeReadonly<V> | undefined;\n <V = StoreValue>(\n fieldName: string,\n from?: StoreObject | Reference\n ): SafeReadonly<V> | undefined;\n}\n\nexport type ToReferenceFunction = (\n objOrIdOrRef: StoreObject | string | Reference,\n mergeIntoStore?: boolean\n) => Reference | undefined;\n\nexport type CanReadFunction = (value: StoreValue) => boolean;\n\ndeclare const _deleteModifier: unique symbol;\nexport interface DeleteModifier {\n [_deleteModifier]: true;\n}\ndeclare const _invalidateModifier: unique symbol;\nexport interface InvalidateModifier {\n [_invalidateModifier]: true;\n}\ndeclare const _ignoreModifier: unique symbol;\nexport interface IgnoreModifier {\n [_ignoreModifier]: true;\n}\n\nexport type ModifierDetails = {\n DELETE: DeleteModifier;\n INVALIDATE: InvalidateModifier;\n fieldName: string;\n storeFieldName: string;\n readField: ReadFieldFunction;\n canRead: CanReadFunction;\n isReference: typeof isReference;\n toReference: ToReferenceFunction;\n storage: StorageType;\n};\n\nexport type Modifier<T> = (\n value: T,\n details: ModifierDetails\n) => T | DeleteModifier | InvalidateModifier;\n\ntype StoreObjectValueMaybeReference<StoreVal> =\n StoreVal extends Array<Record<string, any>> ?\n StoreVal extends Array<infer Item> ?\n Item extends Record<string, any> ?\n ReadonlyArray<AsStoreObject<Item> | Reference>\n : never\n : never\n : StoreVal extends Record<string, any> ? AsStoreObject<StoreVal> | Reference\n : StoreVal;\n\nexport type AllFieldsModifier<Entity extends Record<string, any>> = Modifier<\n Entity[keyof Entity] extends infer Value ?\n StoreObjectValueMaybeReference<Exclude<Value, undefined>>\n : never\n>;\n\nexport type Modifiers<T extends Record<string, any> = Record<string, unknown>> =\n Partial<{\n [FieldName in keyof T]: Modifier<\n StoreObjectValueMaybeReference<Exclude<T[FieldName], undefined>>\n >;\n }>;\n"]}
|
||||
20
graphql-subscription/node_modules/@apollo/client/cache/index.d.ts
generated
vendored
Normal file
20
graphql-subscription/node_modules/@apollo/client/cache/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import "../utilities/globals/index.js";
|
||||
export type { Transaction } from "./core/cache.js";
|
||||
export { ApolloCache } from "./core/cache.js";
|
||||
export { Cache } from "./core/types/Cache.js";
|
||||
export type { DataProxy } from "./core/types/DataProxy.js";
|
||||
export type { MissingTree, Modifier, Modifiers, ModifierDetails, ReadFieldOptions, } from "./core/types/common.js";
|
||||
export { MissingFieldError } from "./core/types/common.js";
|
||||
export type { Reference } from "../utilities/index.js";
|
||||
export { isReference, makeReference, canonicalStringify, } from "../utilities/index.js";
|
||||
export { EntityStore } from "./inmemory/entityStore.js";
|
||||
export { fieldNameFromStoreName, defaultDataIdFromObject, } from "./inmemory/helpers.js";
|
||||
export { InMemoryCache } from "./inmemory/inMemoryCache.js";
|
||||
export type { ReactiveVar } from "./inmemory/reactiveVars.js";
|
||||
export { makeVar, cacheSlot } from "./inmemory/reactiveVars.js";
|
||||
export type { TypePolicies, TypePolicy, FieldPolicy, FieldReadFunction, FieldMergeFunction, FieldFunctionOptions, PossibleTypesMap, } from "./inmemory/policies.js";
|
||||
export { Policies } from "./inmemory/policies.js";
|
||||
export type { FragmentRegistryAPI } from "./inmemory/fragmentRegistry.js";
|
||||
export { createFragmentRegistry } from "./inmemory/fragmentRegistry.js";
|
||||
export * from "./inmemory/types.js";
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
13
graphql-subscription/node_modules/@apollo/client/cache/index.js
generated
vendored
Normal file
13
graphql-subscription/node_modules/@apollo/client/cache/index.js
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
import "../utilities/globals/index.js";
|
||||
export { ApolloCache } from "./core/cache.js";
|
||||
export { Cache } from "./core/types/Cache.js";
|
||||
export { MissingFieldError } from "./core/types/common.js";
|
||||
export { isReference, makeReference, canonicalStringify, } from "../utilities/index.js";
|
||||
export { EntityStore } from "./inmemory/entityStore.js";
|
||||
export { fieldNameFromStoreName, defaultDataIdFromObject, } from "./inmemory/helpers.js";
|
||||
export { InMemoryCache } from "./inmemory/inMemoryCache.js";
|
||||
export { makeVar, cacheSlot } from "./inmemory/reactiveVars.js";
|
||||
export { Policies } from "./inmemory/policies.js";
|
||||
export { createFragmentRegistry } from "./inmemory/fragmentRegistry.js";
|
||||
export * from "./inmemory/types.js";
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/index.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/cache/index.ts"],"names":[],"mappings":"AAAA,OAAO,+BAA+B,CAAC;AAGvC,OAAO,EAAE,WAAW,EAAE,MAAM,iBAAiB,CAAC;AAC9C,OAAO,EAAE,KAAK,EAAE,MAAM,uBAAuB,CAAC;AAS9C,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAG3D,OAAO,EACL,WAAW,EACX,aAAa,EACb,kBAAkB,GACnB,MAAM,uBAAuB,CAAC;AAE/B,OAAO,EAAE,WAAW,EAAE,MAAM,2BAA2B,CAAC;AACxD,OAAO,EACL,sBAAsB,EACtB,uBAAuB,GACxB,MAAM,uBAAuB,CAAC;AAE/B,OAAO,EAAE,aAAa,EAAE,MAAM,6BAA6B,CAAC;AAG5D,OAAO,EAAE,OAAO,EAAE,SAAS,EAAE,MAAM,4BAA4B,CAAC;AAWhE,OAAO,EAAE,QAAQ,EAAE,MAAM,wBAAwB,CAAC;AAGlD,OAAO,EAAE,sBAAsB,EAAE,MAAM,gCAAgC,CAAC;AAExE,cAAc,qBAAqB,CAAC","sourcesContent":["import \"../utilities/globals/index.js\";\n\nexport type { Transaction } from \"./core/cache.js\";\nexport { ApolloCache } from \"./core/cache.js\";\nexport { Cache } from \"./core/types/Cache.js\";\nexport type { DataProxy } from \"./core/types/DataProxy.js\";\nexport type {\n MissingTree,\n Modifier,\n Modifiers,\n ModifierDetails,\n ReadFieldOptions,\n} from \"./core/types/common.js\";\nexport { MissingFieldError } from \"./core/types/common.js\";\n\nexport type { Reference } from \"../utilities/index.js\";\nexport {\n isReference,\n makeReference,\n canonicalStringify,\n} from \"../utilities/index.js\";\n\nexport { EntityStore } from \"./inmemory/entityStore.js\";\nexport {\n fieldNameFromStoreName,\n defaultDataIdFromObject,\n} from \"./inmemory/helpers.js\";\n\nexport { InMemoryCache } from \"./inmemory/inMemoryCache.js\";\n\nexport type { ReactiveVar } from \"./inmemory/reactiveVars.js\";\nexport { makeVar, cacheSlot } from \"./inmemory/reactiveVars.js\";\n\nexport type {\n TypePolicies,\n TypePolicy,\n FieldPolicy,\n FieldReadFunction,\n FieldMergeFunction,\n FieldFunctionOptions,\n PossibleTypesMap,\n} from \"./inmemory/policies.js\";\nexport { Policies } from \"./inmemory/policies.js\";\n\nexport type { FragmentRegistryAPI } from \"./inmemory/fragmentRegistry.js\";\nexport { createFragmentRegistry } from \"./inmemory/fragmentRegistry.js\";\n\nexport * from \"./inmemory/types.js\";\n"]}
|
||||
92
graphql-subscription/node_modules/@apollo/client/cache/inmemory/entityStore.d.ts
generated
vendored
Normal file
92
graphql-subscription/node_modules/@apollo/client/cache/inmemory/entityStore.d.ts
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
import { Trie } from "@wry/trie";
|
||||
import type { StoreValue, StoreObject, Reference } from "../../utilities/index.js";
|
||||
import type { NormalizedCache, NormalizedCacheObject } from "./types.js";
|
||||
import type { Policies, StorageType } from "./policies.js";
|
||||
import type { Cache } from "../core/types/Cache.js";
|
||||
import type { SafeReadonly, Modifier, Modifiers, ToReferenceFunction, CanReadFunction } from "../core/types/common.js";
|
||||
import type { DocumentNode, FieldNode, SelectionSetNode } from "graphql";
|
||||
export declare abstract class EntityStore implements NormalizedCache {
|
||||
readonly policies: Policies;
|
||||
readonly group: CacheGroup;
|
||||
protected data: NormalizedCacheObject;
|
||||
constructor(policies: Policies, group: CacheGroup);
|
||||
abstract addLayer(layerId: string, replay: (layer: EntityStore) => any): Layer;
|
||||
abstract removeLayer(layerId: string): EntityStore;
|
||||
toObject(): NormalizedCacheObject;
|
||||
has(dataId: string): boolean;
|
||||
get(dataId: string, fieldName: string): StoreValue;
|
||||
protected lookup(dataId: string, dependOnExistence?: boolean): StoreObject | undefined;
|
||||
merge(older: string | StoreObject, newer: StoreObject | string): void;
|
||||
modify(dataId: string, fields: Modifier<any> | Modifiers<Record<string, any>>): boolean;
|
||||
delete(dataId: string, fieldName?: string, args?: Record<string, any>): boolean;
|
||||
evict(options: Cache.EvictOptions, limit: EntityStore): boolean;
|
||||
clear(): void;
|
||||
extract(): NormalizedCacheObject;
|
||||
replace(newData: NormalizedCacheObject | null): void;
|
||||
abstract getStorage(idOrObj: string | StoreObject, ...storeFieldNames: (string | number)[]): StorageType;
|
||||
private rootIds;
|
||||
retain(rootId: string): number;
|
||||
release(rootId: string): number;
|
||||
getRootIdSet(ids?: Set<string>): Set<string>;
|
||||
gc(): string[];
|
||||
private refs;
|
||||
findChildRefIds(dataId: string): Record<string, true>;
|
||||
/** overload for `InMemoryCache.maybeBroadcastWatch` */
|
||||
makeCacheKey(document: DocumentNode, callback: Cache.WatchCallback<any>, details: string): object;
|
||||
/** overload for `StoreReader.executeSelectionSet` */
|
||||
makeCacheKey(selectionSet: SelectionSetNode, parent: string | StoreObject, varString: string | undefined, canonizeResults: boolean): object;
|
||||
/** overload for `StoreReader.executeSubSelectedArray` */
|
||||
makeCacheKey(field: FieldNode, array: readonly any[], varString: string | undefined): object;
|
||||
/** @deprecated This is only meant for internal usage,
|
||||
* in your own code please use a `Trie` instance instead. */
|
||||
makeCacheKey(...args: any[]): object;
|
||||
getFieldValue: <T = StoreValue>(objectOrReference: StoreObject | Reference | undefined, storeFieldName: string) => SafeReadonly<T>;
|
||||
canRead: CanReadFunction;
|
||||
toReference: ToReferenceFunction;
|
||||
}
|
||||
export type FieldValueGetter = EntityStore["getFieldValue"];
|
||||
declare class CacheGroup {
|
||||
readonly caching: boolean;
|
||||
private parent;
|
||||
private d;
|
||||
keyMaker: Trie<object>;
|
||||
constructor(caching: boolean, parent?: CacheGroup | null);
|
||||
resetCaching(): void;
|
||||
depend(dataId: string, storeFieldName: string): void;
|
||||
dirty(dataId: string, storeFieldName: string): void;
|
||||
}
|
||||
export declare function maybeDependOnExistenceOfEntity(store: NormalizedCache, entityId: string): void;
|
||||
export declare namespace EntityStore {
|
||||
class Root extends EntityStore {
|
||||
constructor({ policies, resultCaching, seed, }: {
|
||||
policies: Policies;
|
||||
resultCaching?: boolean;
|
||||
seed?: NormalizedCacheObject;
|
||||
});
|
||||
readonly stump: Stump;
|
||||
addLayer(layerId: string, replay: (layer: EntityStore) => any): Layer;
|
||||
removeLayer(): Root;
|
||||
readonly storageTrie: Trie<StorageType>;
|
||||
getStorage(): StorageType;
|
||||
}
|
||||
}
|
||||
declare class Layer extends EntityStore {
|
||||
readonly id: string;
|
||||
readonly parent: EntityStore;
|
||||
readonly replay: (layer: EntityStore) => any;
|
||||
readonly group: CacheGroup;
|
||||
constructor(id: string, parent: EntityStore, replay: (layer: EntityStore) => any, group: CacheGroup);
|
||||
addLayer(layerId: string, replay: (layer: EntityStore) => any): Layer;
|
||||
removeLayer(layerId: string): EntityStore;
|
||||
toObject(): NormalizedCacheObject;
|
||||
findChildRefIds(dataId: string): Record<string, true>;
|
||||
getStorage(): StorageType;
|
||||
}
|
||||
declare class Stump extends Layer {
|
||||
constructor(root: EntityStore.Root);
|
||||
removeLayer(): this;
|
||||
merge(older: string | StoreObject, newer: string | StoreObject): void;
|
||||
}
|
||||
export declare function supportsResultCaching(store: any): store is EntityStore;
|
||||
export {};
|
||||
//# sourceMappingURL=entityStore.d.ts.map
|
||||
671
graphql-subscription/node_modules/@apollo/client/cache/inmemory/entityStore.js
generated
vendored
Normal file
671
graphql-subscription/node_modules/@apollo/client/cache/inmemory/entityStore.js
generated
vendored
Normal file
@@ -0,0 +1,671 @@
|
||||
import { __assign, __extends, __rest } from "tslib";
|
||||
import { invariant } from "../../utilities/globals/index.js";
|
||||
import { dep } from "optimism";
|
||||
import { equal } from "@wry/equality";
|
||||
import { Trie } from "@wry/trie";
|
||||
import { isReference, makeReference, DeepMerger, maybeDeepFreeze, canUseWeakMap, isNonNullObject, } from "../../utilities/index.js";
|
||||
import { hasOwn, fieldNameFromStoreName } from "./helpers.js";
|
||||
var DELETE = Object.create(null);
|
||||
var delModifier = function () { return DELETE; };
|
||||
var INVALIDATE = Object.create(null);
|
||||
var EntityStore = /** @class */ (function () {
|
||||
function EntityStore(policies, group) {
|
||||
var _this = this;
|
||||
this.policies = policies;
|
||||
this.group = group;
|
||||
this.data = Object.create(null);
|
||||
// Maps root entity IDs to the number of times they have been retained, minus
|
||||
// the number of times they have been released. Retained entities keep other
|
||||
// entities they reference (even indirectly) from being garbage collected.
|
||||
this.rootIds = Object.create(null);
|
||||
// Lazily tracks { __ref: <dataId> } strings contained by this.data[dataId].
|
||||
this.refs = Object.create(null);
|
||||
// Bound function that can be passed around to provide easy access to fields
|
||||
// of Reference objects as well as ordinary objects.
|
||||
this.getFieldValue = function (objectOrReference, storeFieldName) {
|
||||
return maybeDeepFreeze(isReference(objectOrReference) ?
|
||||
_this.get(objectOrReference.__ref, storeFieldName)
|
||||
: objectOrReference && objectOrReference[storeFieldName]);
|
||||
};
|
||||
// Returns true for non-normalized StoreObjects and non-dangling
|
||||
// References, indicating that readField(name, objOrRef) has a chance of
|
||||
// working. Useful for filtering out dangling references from lists.
|
||||
this.canRead = function (objOrRef) {
|
||||
return isReference(objOrRef) ?
|
||||
_this.has(objOrRef.__ref)
|
||||
: typeof objOrRef === "object";
|
||||
};
|
||||
// Bound function that converts an id or an object with a __typename and
|
||||
// primary key fields to a Reference object. If called with a Reference object,
|
||||
// that same Reference object is returned. Pass true for mergeIntoStore to persist
|
||||
// an object into the store.
|
||||
this.toReference = function (objOrIdOrRef, mergeIntoStore) {
|
||||
if (typeof objOrIdOrRef === "string") {
|
||||
return makeReference(objOrIdOrRef);
|
||||
}
|
||||
if (isReference(objOrIdOrRef)) {
|
||||
return objOrIdOrRef;
|
||||
}
|
||||
var id = _this.policies.identify(objOrIdOrRef)[0];
|
||||
if (id) {
|
||||
var ref = makeReference(id);
|
||||
if (mergeIntoStore) {
|
||||
_this.merge(id, objOrIdOrRef);
|
||||
}
|
||||
return ref;
|
||||
}
|
||||
};
|
||||
}
|
||||
// Although the EntityStore class is abstract, it contains concrete
|
||||
// implementations of the various NormalizedCache interface methods that
|
||||
// are inherited by the Root and Layer subclasses.
|
||||
EntityStore.prototype.toObject = function () {
|
||||
return __assign({}, this.data);
|
||||
};
|
||||
EntityStore.prototype.has = function (dataId) {
|
||||
return this.lookup(dataId, true) !== void 0;
|
||||
};
|
||||
EntityStore.prototype.get = function (dataId, fieldName) {
|
||||
this.group.depend(dataId, fieldName);
|
||||
if (hasOwn.call(this.data, dataId)) {
|
||||
var storeObject = this.data[dataId];
|
||||
if (storeObject && hasOwn.call(storeObject, fieldName)) {
|
||||
return storeObject[fieldName];
|
||||
}
|
||||
}
|
||||
if (fieldName === "__typename" &&
|
||||
hasOwn.call(this.policies.rootTypenamesById, dataId)) {
|
||||
return this.policies.rootTypenamesById[dataId];
|
||||
}
|
||||
if (this instanceof Layer) {
|
||||
return this.parent.get(dataId, fieldName);
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.lookup = function (dataId, dependOnExistence) {
|
||||
// The has method (above) calls lookup with dependOnExistence = true, so
|
||||
// that it can later be invalidated when we add or remove a StoreObject for
|
||||
// this dataId. Any consumer who cares about the contents of the StoreObject
|
||||
// should not rely on this dependency, since the contents could change
|
||||
// without the object being added or removed.
|
||||
if (dependOnExistence)
|
||||
this.group.depend(dataId, "__exists");
|
||||
if (hasOwn.call(this.data, dataId)) {
|
||||
return this.data[dataId];
|
||||
}
|
||||
if (this instanceof Layer) {
|
||||
return this.parent.lookup(dataId, dependOnExistence);
|
||||
}
|
||||
if (this.policies.rootTypenamesById[dataId]) {
|
||||
return Object.create(null);
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.merge = function (older, newer) {
|
||||
var _this = this;
|
||||
var dataId;
|
||||
// Convert unexpected references to ID strings.
|
||||
if (isReference(older))
|
||||
older = older.__ref;
|
||||
if (isReference(newer))
|
||||
newer = newer.__ref;
|
||||
var existing = typeof older === "string" ? this.lookup((dataId = older)) : older;
|
||||
var incoming = typeof newer === "string" ? this.lookup((dataId = newer)) : newer;
|
||||
// If newer was a string ID, but that ID was not defined in this store,
|
||||
// then there are no fields to be merged, so we're done.
|
||||
if (!incoming)
|
||||
return;
|
||||
invariant(typeof dataId === "string", 1);
|
||||
var merged = new DeepMerger(storeObjectReconciler).merge(existing, incoming);
|
||||
// Even if merged === existing, existing may have come from a lower
|
||||
// layer, so we always need to set this.data[dataId] on this level.
|
||||
this.data[dataId] = merged;
|
||||
if (merged !== existing) {
|
||||
delete this.refs[dataId];
|
||||
if (this.group.caching) {
|
||||
var fieldsToDirty_1 = Object.create(null);
|
||||
// If we added a new StoreObject where there was previously none, dirty
|
||||
// anything that depended on the existence of this dataId, such as the
|
||||
// EntityStore#has method.
|
||||
if (!existing)
|
||||
fieldsToDirty_1.__exists = 1;
|
||||
// Now invalidate dependents who called getFieldValue for any fields
|
||||
// that are changing as a result of this merge.
|
||||
Object.keys(incoming).forEach(function (storeFieldName) {
|
||||
if (!existing ||
|
||||
existing[storeFieldName] !== merged[storeFieldName]) {
|
||||
// Always dirty the full storeFieldName, which may include
|
||||
// serialized arguments following the fieldName prefix.
|
||||
fieldsToDirty_1[storeFieldName] = 1;
|
||||
// Also dirty fieldNameFromStoreName(storeFieldName) if it's
|
||||
// different from storeFieldName and this field does not have
|
||||
// keyArgs configured, because that means the cache can't make
|
||||
// any assumptions about how field values with the same field
|
||||
// name but different arguments might be interrelated, so it
|
||||
// must err on the side of invalidating all field values that
|
||||
// share the same short fieldName, regardless of arguments.
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
if (fieldName !== storeFieldName &&
|
||||
!_this.policies.hasKeyArgs(merged.__typename, fieldName)) {
|
||||
fieldsToDirty_1[fieldName] = 1;
|
||||
}
|
||||
// If merged[storeFieldName] has become undefined, and this is the
|
||||
// Root layer, actually delete the property from the merged object,
|
||||
// which is guaranteed to have been created fresh in this method.
|
||||
if (merged[storeFieldName] === void 0 && !(_this instanceof Layer)) {
|
||||
delete merged[storeFieldName];
|
||||
}
|
||||
}
|
||||
});
|
||||
if (fieldsToDirty_1.__typename &&
|
||||
!(existing && existing.__typename) &&
|
||||
// Since we return default root __typename strings
|
||||
// automatically from store.get, we don't need to dirty the
|
||||
// ROOT_QUERY.__typename field if merged.__typename is equal
|
||||
// to the default string (usually "Query").
|
||||
this.policies.rootTypenamesById[dataId] === merged.__typename) {
|
||||
delete fieldsToDirty_1.__typename;
|
||||
}
|
||||
Object.keys(fieldsToDirty_1).forEach(function (fieldName) {
|
||||
return _this.group.dirty(dataId, fieldName);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.modify = function (dataId, fields) {
|
||||
var _this = this;
|
||||
var storeObject = this.lookup(dataId);
|
||||
if (storeObject) {
|
||||
var changedFields_1 = Object.create(null);
|
||||
var needToMerge_1 = false;
|
||||
var allDeleted_1 = true;
|
||||
var sharedDetails_1 = {
|
||||
DELETE: DELETE,
|
||||
INVALIDATE: INVALIDATE,
|
||||
isReference: isReference,
|
||||
toReference: this.toReference,
|
||||
canRead: this.canRead,
|
||||
readField: function (fieldNameOrOptions, from) {
|
||||
return _this.policies.readField(typeof fieldNameOrOptions === "string" ?
|
||||
{
|
||||
fieldName: fieldNameOrOptions,
|
||||
from: from || makeReference(dataId),
|
||||
}
|
||||
: fieldNameOrOptions, { store: _this });
|
||||
},
|
||||
};
|
||||
Object.keys(storeObject).forEach(function (storeFieldName) {
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var fieldValue = storeObject[storeFieldName];
|
||||
if (fieldValue === void 0)
|
||||
return;
|
||||
var modify = typeof fields === "function" ? fields : (fields[storeFieldName] || fields[fieldName]);
|
||||
if (modify) {
|
||||
var newValue = modify === delModifier ? DELETE : (modify(maybeDeepFreeze(fieldValue), __assign(__assign({}, sharedDetails_1), { fieldName: fieldName, storeFieldName: storeFieldName, storage: _this.getStorage(dataId, storeFieldName) })));
|
||||
if (newValue === INVALIDATE) {
|
||||
_this.group.dirty(dataId, storeFieldName);
|
||||
}
|
||||
else {
|
||||
if (newValue === DELETE)
|
||||
newValue = void 0;
|
||||
if (newValue !== fieldValue) {
|
||||
changedFields_1[storeFieldName] = newValue;
|
||||
needToMerge_1 = true;
|
||||
fieldValue = newValue;
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
var checkReference = function (ref) {
|
||||
if (_this.lookup(ref.__ref) === undefined) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(2, ref);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
if (isReference(newValue)) {
|
||||
checkReference(newValue);
|
||||
}
|
||||
else if (Array.isArray(newValue)) {
|
||||
// Warn about writing "mixed" arrays of Reference and non-Reference objects
|
||||
var seenReference = false;
|
||||
var someNonReference = void 0;
|
||||
for (var _i = 0, newValue_1 = newValue; _i < newValue_1.length; _i++) {
|
||||
var value = newValue_1[_i];
|
||||
if (isReference(value)) {
|
||||
seenReference = true;
|
||||
if (checkReference(value))
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// Do not warn on primitive values, since those could never be represented
|
||||
// by a reference. This is a valid (albeit uncommon) use case.
|
||||
if (typeof value === "object" && !!value) {
|
||||
var id = _this.policies.identify(value)[0];
|
||||
// check if object could even be referenced, otherwise we are not interested in it for this warning
|
||||
if (id) {
|
||||
someNonReference = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (seenReference && someNonReference !== undefined) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(3, someNonReference);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (fieldValue !== void 0) {
|
||||
allDeleted_1 = false;
|
||||
}
|
||||
});
|
||||
if (needToMerge_1) {
|
||||
this.merge(dataId, changedFields_1);
|
||||
if (allDeleted_1) {
|
||||
if (this instanceof Layer) {
|
||||
this.data[dataId] = void 0;
|
||||
}
|
||||
else {
|
||||
delete this.data[dataId];
|
||||
}
|
||||
this.group.dirty(dataId, "__exists");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
// If called with only one argument, removes the entire entity
|
||||
// identified by dataId. If called with a fieldName as well, removes all
|
||||
// fields of that entity whose names match fieldName according to the
|
||||
// fieldNameFromStoreName helper function. If called with a fieldName
|
||||
// and variables, removes all fields of that entity whose names match fieldName
|
||||
// and whose arguments when cached exactly match the variables passed.
|
||||
EntityStore.prototype.delete = function (dataId, fieldName, args) {
|
||||
var _a;
|
||||
var storeObject = this.lookup(dataId);
|
||||
if (storeObject) {
|
||||
var typename = this.getFieldValue(storeObject, "__typename");
|
||||
var storeFieldName = fieldName && args ?
|
||||
this.policies.getStoreFieldName({ typename: typename, fieldName: fieldName, args: args })
|
||||
: fieldName;
|
||||
return this.modify(dataId, storeFieldName ? (_a = {},
|
||||
_a[storeFieldName] = delModifier,
|
||||
_a) : delModifier);
|
||||
}
|
||||
return false;
|
||||
};
|
||||
EntityStore.prototype.evict = function (options, limit) {
|
||||
var evicted = false;
|
||||
if (options.id) {
|
||||
if (hasOwn.call(this.data, options.id)) {
|
||||
evicted = this.delete(options.id, options.fieldName, options.args);
|
||||
}
|
||||
if (this instanceof Layer && this !== limit) {
|
||||
evicted = this.parent.evict(options, limit) || evicted;
|
||||
}
|
||||
// Always invalidate the field to trigger rereading of watched
|
||||
// queries, even if no cache data was modified by the eviction,
|
||||
// because queries may depend on computed fields with custom read
|
||||
// functions, whose values are not stored in the EntityStore.
|
||||
if (options.fieldName || evicted) {
|
||||
this.group.dirty(options.id, options.fieldName || "__exists");
|
||||
}
|
||||
}
|
||||
return evicted;
|
||||
};
|
||||
EntityStore.prototype.clear = function () {
|
||||
this.replace(null);
|
||||
};
|
||||
EntityStore.prototype.extract = function () {
|
||||
var _this = this;
|
||||
var obj = this.toObject();
|
||||
var extraRootIds = [];
|
||||
this.getRootIdSet().forEach(function (id) {
|
||||
if (!hasOwn.call(_this.policies.rootTypenamesById, id)) {
|
||||
extraRootIds.push(id);
|
||||
}
|
||||
});
|
||||
if (extraRootIds.length) {
|
||||
obj.__META = { extraRootIds: extraRootIds.sort() };
|
||||
}
|
||||
return obj;
|
||||
};
|
||||
EntityStore.prototype.replace = function (newData) {
|
||||
var _this = this;
|
||||
Object.keys(this.data).forEach(function (dataId) {
|
||||
if (!(newData && hasOwn.call(newData, dataId))) {
|
||||
_this.delete(dataId);
|
||||
}
|
||||
});
|
||||
if (newData) {
|
||||
var __META = newData.__META, rest_1 = __rest(newData, ["__META"]);
|
||||
Object.keys(rest_1).forEach(function (dataId) {
|
||||
_this.merge(dataId, rest_1[dataId]);
|
||||
});
|
||||
if (__META) {
|
||||
__META.extraRootIds.forEach(this.retain, this);
|
||||
}
|
||||
}
|
||||
};
|
||||
EntityStore.prototype.retain = function (rootId) {
|
||||
return (this.rootIds[rootId] = (this.rootIds[rootId] || 0) + 1);
|
||||
};
|
||||
EntityStore.prototype.release = function (rootId) {
|
||||
if (this.rootIds[rootId] > 0) {
|
||||
var count = --this.rootIds[rootId];
|
||||
if (!count)
|
||||
delete this.rootIds[rootId];
|
||||
return count;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
// Return a Set<string> of all the ID strings that have been retained by
|
||||
// this layer/root *and* any layers/roots beneath it.
|
||||
EntityStore.prototype.getRootIdSet = function (ids) {
|
||||
if (ids === void 0) { ids = new Set(); }
|
||||
Object.keys(this.rootIds).forEach(ids.add, ids);
|
||||
if (this instanceof Layer) {
|
||||
this.parent.getRootIdSet(ids);
|
||||
}
|
||||
else {
|
||||
// Official singleton IDs like ROOT_QUERY and ROOT_MUTATION are
|
||||
// always considered roots for garbage collection, regardless of
|
||||
// their retainment counts in this.rootIds.
|
||||
Object.keys(this.policies.rootTypenamesById).forEach(ids.add, ids);
|
||||
}
|
||||
return ids;
|
||||
};
|
||||
// The goal of garbage collection is to remove IDs from the Root layer of the
|
||||
// store that are no longer reachable starting from any IDs that have been
|
||||
// explicitly retained (see retain and release, above). Returns an array of
|
||||
// dataId strings that were removed from the store.
|
||||
EntityStore.prototype.gc = function () {
|
||||
var _this = this;
|
||||
var ids = this.getRootIdSet();
|
||||
var snapshot = this.toObject();
|
||||
ids.forEach(function (id) {
|
||||
if (hasOwn.call(snapshot, id)) {
|
||||
// Because we are iterating over an ECMAScript Set, the IDs we add here
|
||||
// will be visited in later iterations of the forEach loop only if they
|
||||
// were not previously contained by the Set.
|
||||
Object.keys(_this.findChildRefIds(id)).forEach(ids.add, ids);
|
||||
// By removing IDs from the snapshot object here, we protect them from
|
||||
// getting removed from the root store layer below.
|
||||
delete snapshot[id];
|
||||
}
|
||||
});
|
||||
var idsToRemove = Object.keys(snapshot);
|
||||
if (idsToRemove.length) {
|
||||
var root_1 = this;
|
||||
while (root_1 instanceof Layer)
|
||||
root_1 = root_1.parent;
|
||||
idsToRemove.forEach(function (id) { return root_1.delete(id); });
|
||||
}
|
||||
return idsToRemove;
|
||||
};
|
||||
EntityStore.prototype.findChildRefIds = function (dataId) {
|
||||
if (!hasOwn.call(this.refs, dataId)) {
|
||||
var found_1 = (this.refs[dataId] = Object.create(null));
|
||||
var root = this.data[dataId];
|
||||
if (!root)
|
||||
return found_1;
|
||||
var workSet_1 = new Set([root]);
|
||||
// Within the store, only arrays and objects can contain child entity
|
||||
// references, so we can prune the traversal using this predicate:
|
||||
workSet_1.forEach(function (obj) {
|
||||
if (isReference(obj)) {
|
||||
found_1[obj.__ref] = true;
|
||||
// In rare cases, a { __ref } Reference object may have other fields.
|
||||
// This often indicates a mismerging of References with StoreObjects,
|
||||
// but garbage collection should not be fooled by a stray __ref
|
||||
// property in a StoreObject (ignoring all the other fields just
|
||||
// because the StoreObject looks like a Reference). To avoid this
|
||||
// premature termination of findChildRefIds recursion, we fall through
|
||||
// to the code below, which will handle any other properties of obj.
|
||||
}
|
||||
if (isNonNullObject(obj)) {
|
||||
Object.keys(obj).forEach(function (key) {
|
||||
var child = obj[key];
|
||||
// No need to add primitive values to the workSet, since they cannot
|
||||
// contain reference objects.
|
||||
if (isNonNullObject(child)) {
|
||||
workSet_1.add(child);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return this.refs[dataId];
|
||||
};
|
||||
EntityStore.prototype.makeCacheKey = function () {
|
||||
return this.group.keyMaker.lookupArray(arguments);
|
||||
};
|
||||
return EntityStore;
|
||||
}());
|
||||
export { EntityStore };
|
||||
// A single CacheGroup represents a set of one or more EntityStore objects,
|
||||
// typically the Root store in a CacheGroup by itself, and all active Layer
|
||||
// stores in a group together. A single EntityStore object belongs to only
|
||||
// one CacheGroup, store.group. The CacheGroup is responsible for tracking
|
||||
// dependencies, so store.group is helpful for generating unique keys for
|
||||
// cached results that need to be invalidated when/if those dependencies
|
||||
// change. If we used the EntityStore objects themselves as cache keys (that
|
||||
// is, store rather than store.group), the cache would become unnecessarily
|
||||
// fragmented by all the different Layer objects. Instead, the CacheGroup
|
||||
// approach allows all optimistic Layer objects in the same linked list to
|
||||
// belong to one CacheGroup, with the non-optimistic Root object belonging
|
||||
// to another CacheGroup, allowing resultCaching dependencies to be tracked
|
||||
// separately for optimistic and non-optimistic entity data.
|
||||
var CacheGroup = /** @class */ (function () {
|
||||
function CacheGroup(caching, parent) {
|
||||
if (parent === void 0) { parent = null; }
|
||||
this.caching = caching;
|
||||
this.parent = parent;
|
||||
this.d = null;
|
||||
this.resetCaching();
|
||||
}
|
||||
CacheGroup.prototype.resetCaching = function () {
|
||||
this.d = this.caching ? dep() : null;
|
||||
this.keyMaker = new Trie(canUseWeakMap);
|
||||
};
|
||||
CacheGroup.prototype.depend = function (dataId, storeFieldName) {
|
||||
if (this.d) {
|
||||
this.d(makeDepKey(dataId, storeFieldName));
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
if (fieldName !== storeFieldName) {
|
||||
// Fields with arguments that contribute extra identifying
|
||||
// information to the fieldName (thus forming the storeFieldName)
|
||||
// depend not only on the full storeFieldName but also on the
|
||||
// short fieldName, so the field can be invalidated using either
|
||||
// level of specificity.
|
||||
this.d(makeDepKey(dataId, fieldName));
|
||||
}
|
||||
if (this.parent) {
|
||||
this.parent.depend(dataId, storeFieldName);
|
||||
}
|
||||
}
|
||||
};
|
||||
CacheGroup.prototype.dirty = function (dataId, storeFieldName) {
|
||||
if (this.d) {
|
||||
this.d.dirty(makeDepKey(dataId, storeFieldName),
|
||||
// When storeFieldName === "__exists", that means the entity identified
|
||||
// by dataId has either disappeared from the cache or was newly added,
|
||||
// so the result caching system would do well to "forget everything it
|
||||
// knows" about that object. To achieve that kind of invalidation, we
|
||||
// not only dirty the associated result cache entry, but also remove it
|
||||
// completely from the dependency graph. For the optimism implementation
|
||||
// details, see https://github.com/benjamn/optimism/pull/195.
|
||||
storeFieldName === "__exists" ? "forget" : "setDirty");
|
||||
}
|
||||
};
|
||||
return CacheGroup;
|
||||
}());
|
||||
function makeDepKey(dataId, storeFieldName) {
|
||||
// Since field names cannot have '#' characters in them, this method
|
||||
// of joining the field name and the ID should be unambiguous, and much
|
||||
// cheaper than JSON.stringify([dataId, fieldName]).
|
||||
return storeFieldName + "#" + dataId;
|
||||
}
|
||||
export function maybeDependOnExistenceOfEntity(store, entityId) {
|
||||
if (supportsResultCaching(store)) {
|
||||
// We use this pseudo-field __exists elsewhere in the EntityStore code to
|
||||
// represent changes in the existence of the entity object identified by
|
||||
// entityId. This dependency gets reliably dirtied whenever an object with
|
||||
// this ID is deleted (or newly created) within this group, so any result
|
||||
// cache entries (for example, StoreReader#executeSelectionSet results) that
|
||||
// depend on __exists for this entityId will get dirtied as well, leading to
|
||||
// the eventual recomputation (instead of reuse) of those result objects the
|
||||
// next time someone reads them from the cache.
|
||||
store.group.depend(entityId, "__exists");
|
||||
}
|
||||
}
|
||||
(function (EntityStore) {
|
||||
// Refer to this class as EntityStore.Root outside this namespace.
|
||||
var Root = /** @class */ (function (_super) {
|
||||
__extends(Root, _super);
|
||||
function Root(_a) {
|
||||
var policies = _a.policies, _b = _a.resultCaching, resultCaching = _b === void 0 ? true : _b, seed = _a.seed;
|
||||
var _this = _super.call(this, policies, new CacheGroup(resultCaching)) || this;
|
||||
_this.stump = new Stump(_this);
|
||||
_this.storageTrie = new Trie(canUseWeakMap);
|
||||
if (seed)
|
||||
_this.replace(seed);
|
||||
return _this;
|
||||
}
|
||||
Root.prototype.addLayer = function (layerId, replay) {
|
||||
// Adding an optimistic Layer on top of the Root actually adds the Layer
|
||||
// on top of the Stump, so the Stump always comes between the Root and
|
||||
// any Layer objects that we've added.
|
||||
return this.stump.addLayer(layerId, replay);
|
||||
};
|
||||
Root.prototype.removeLayer = function () {
|
||||
// Never remove the root layer.
|
||||
return this;
|
||||
};
|
||||
Root.prototype.getStorage = function () {
|
||||
return this.storageTrie.lookupArray(arguments);
|
||||
};
|
||||
return Root;
|
||||
}(EntityStore));
|
||||
EntityStore.Root = Root;
|
||||
})(EntityStore || (EntityStore = {}));
|
||||
// Not exported, since all Layer instances are created by the addLayer method
|
||||
// of the EntityStore.Root class.
|
||||
var Layer = /** @class */ (function (_super) {
|
||||
__extends(Layer, _super);
|
||||
function Layer(id, parent, replay, group) {
|
||||
var _this = _super.call(this, parent.policies, group) || this;
|
||||
_this.id = id;
|
||||
_this.parent = parent;
|
||||
_this.replay = replay;
|
||||
_this.group = group;
|
||||
replay(_this);
|
||||
return _this;
|
||||
}
|
||||
Layer.prototype.addLayer = function (layerId, replay) {
|
||||
return new Layer(layerId, this, replay, this.group);
|
||||
};
|
||||
Layer.prototype.removeLayer = function (layerId) {
|
||||
var _this = this;
|
||||
// Remove all instances of the given id, not just the first one.
|
||||
var parent = this.parent.removeLayer(layerId);
|
||||
if (layerId === this.id) {
|
||||
if (this.group.caching) {
|
||||
// Dirty every ID we're removing. Technically we might be able to avoid
|
||||
// dirtying fields that have values in higher layers, but we don't have
|
||||
// easy access to higher layers here, and we're about to recreate those
|
||||
// layers anyway (see parent.addLayer below).
|
||||
Object.keys(this.data).forEach(function (dataId) {
|
||||
var ownStoreObject = _this.data[dataId];
|
||||
var parentStoreObject = parent["lookup"](dataId);
|
||||
if (!parentStoreObject) {
|
||||
// The StoreObject identified by dataId was defined in this layer
|
||||
// but will be undefined in the parent layer, so we can delete the
|
||||
// whole entity using this.delete(dataId). Since we're about to
|
||||
// throw this layer away, the only goal of this deletion is to dirty
|
||||
// the removed fields.
|
||||
_this.delete(dataId);
|
||||
}
|
||||
else if (!ownStoreObject) {
|
||||
// This layer had an entry for dataId but it was undefined, which
|
||||
// means the entity was deleted in this layer, and it's about to
|
||||
// become undeleted when we remove this layer, so we need to dirty
|
||||
// all fields that are about to be reexposed.
|
||||
_this.group.dirty(dataId, "__exists");
|
||||
Object.keys(parentStoreObject).forEach(function (storeFieldName) {
|
||||
_this.group.dirty(dataId, storeFieldName);
|
||||
});
|
||||
}
|
||||
else if (ownStoreObject !== parentStoreObject) {
|
||||
// If ownStoreObject is not exactly the same as parentStoreObject,
|
||||
// dirty any fields whose values will change as a result of this
|
||||
// removal.
|
||||
Object.keys(ownStoreObject).forEach(function (storeFieldName) {
|
||||
if (!equal(ownStoreObject[storeFieldName], parentStoreObject[storeFieldName])) {
|
||||
_this.group.dirty(dataId, storeFieldName);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return parent;
|
||||
}
|
||||
// No changes are necessary if the parent chain remains identical.
|
||||
if (parent === this.parent)
|
||||
return this;
|
||||
// Recreate this layer on top of the new parent.
|
||||
return parent.addLayer(this.id, this.replay);
|
||||
};
|
||||
Layer.prototype.toObject = function () {
|
||||
return __assign(__assign({}, this.parent.toObject()), this.data);
|
||||
};
|
||||
Layer.prototype.findChildRefIds = function (dataId) {
|
||||
var fromParent = this.parent.findChildRefIds(dataId);
|
||||
return hasOwn.call(this.data, dataId) ? __assign(__assign({}, fromParent), _super.prototype.findChildRefIds.call(this, dataId)) : fromParent;
|
||||
};
|
||||
Layer.prototype.getStorage = function () {
|
||||
var p = this.parent;
|
||||
while (p.parent)
|
||||
p = p.parent;
|
||||
return p.getStorage.apply(p,
|
||||
// @ts-expect-error
|
||||
arguments);
|
||||
};
|
||||
return Layer;
|
||||
}(EntityStore));
|
||||
// Represents a Layer permanently installed just above the Root, which allows
|
||||
// reading optimistically (and registering optimistic dependencies) even when
|
||||
// no optimistic layers are currently active. The stump.group CacheGroup object
|
||||
// is shared by any/all Layer objects added on top of the Stump.
|
||||
var Stump = /** @class */ (function (_super) {
|
||||
__extends(Stump, _super);
|
||||
function Stump(root) {
|
||||
return _super.call(this, "EntityStore.Stump", root, function () { }, new CacheGroup(root.group.caching, root.group)) || this;
|
||||
}
|
||||
Stump.prototype.removeLayer = function () {
|
||||
// Never remove the Stump layer.
|
||||
return this;
|
||||
};
|
||||
Stump.prototype.merge = function (older, newer) {
|
||||
// We never want to write any data into the Stump, so we forward any merge
|
||||
// calls to the Root instead. Another option here would be to throw an
|
||||
// exception, but the toReference(object, true) function can sometimes
|
||||
// trigger Stump writes (which used to be Root writes, before the Stump
|
||||
// concept was introduced).
|
||||
return this.parent.merge(older, newer);
|
||||
};
|
||||
return Stump;
|
||||
}(Layer));
|
||||
function storeObjectReconciler(existingObject, incomingObject, property) {
|
||||
var existingValue = existingObject[property];
|
||||
var incomingValue = incomingObject[property];
|
||||
// Wherever there is a key collision, prefer the incoming value, unless
|
||||
// it is deeply equal to the existing value. It's worth checking deep
|
||||
// equality here (even though blindly returning incoming would be
|
||||
// logically correct) because preserving the referential identity of
|
||||
// existing data can prevent needless rereading and rerendering.
|
||||
return equal(existingValue, incomingValue) ? existingValue : incomingValue;
|
||||
}
|
||||
export function supportsResultCaching(store) {
|
||||
// When result caching is disabled, store.depend will be null.
|
||||
return !!(store instanceof EntityStore && store.group.caching);
|
||||
}
|
||||
//# sourceMappingURL=entityStore.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/entityStore.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/entityStore.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.d.ts
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
//# sourceMappingURL=fixPolyfills.d.ts.map
|
||||
10
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.js
generated
vendored
Normal file
10
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.js
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
"use strict";
|
||||
// Most JavaScript environments do not need the workarounds implemented in
|
||||
// fixPolyfills.native.ts, so importing fixPolyfills.ts merely imports
|
||||
// this empty module, adding nothing to bundle sizes or execution times.
|
||||
// When bundling for React Native, we substitute fixPolyfills.native.js
|
||||
// for fixPolyfills.js (see the "react-native" section of package.json),
|
||||
// to work around problems with Map and Set polyfills in older versions of
|
||||
// React Native (which should have been fixed in react-native@0.59.0):
|
||||
// https://github.com/apollographql/apollo-client/pull/5962
|
||||
//# sourceMappingURL=fixPolyfills.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"fixPolyfills.js","sourceRoot":"","sources":["../../../src/cache/inmemory/fixPolyfills.ts"],"names":[],"mappings":";AAAA,0EAA0E;AAC1E,sEAAsE;AACtE,wEAAwE;AACxE,uEAAuE;AACvE,wEAAwE;AACxE,0EAA0E;AAC1E,sEAAsE;AACtE,2DAA2D","sourcesContent":["// Most JavaScript environments do not need the workarounds implemented in\n// fixPolyfills.native.ts, so importing fixPolyfills.ts merely imports\n// this empty module, adding nothing to bundle sizes or execution times.\n// When bundling for React Native, we substitute fixPolyfills.native.js\n// for fixPolyfills.js (see the \"react-native\" section of package.json),\n// to work around problems with Map and Set polyfills in older versions of\n// React Native (which should have been fixed in react-native@0.59.0):\n// https://github.com/apollographql/apollo-client/pull/5962\n"]}
|
||||
2
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.d.ts
generated
vendored
Normal file
2
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=fixPolyfills.native.d.ts.map
|
||||
61
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js
generated
vendored
Normal file
61
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Make sure Map.prototype.set returns the Map instance, per spec.
|
||||
// https://github.com/apollographql/apollo-client/issues/4024
|
||||
var testMap = new Map();
|
||||
if (testMap.set(1, 2) !== testMap) {
|
||||
var set_1 = testMap.set;
|
||||
Map.prototype.set = function () {
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
set_1.apply(this, args);
|
||||
return this;
|
||||
};
|
||||
}
|
||||
// Make sure Set.prototype.add returns the Set instance, per spec.
|
||||
var testSet = new Set();
|
||||
if (testSet.add(3) !== testSet) {
|
||||
var add_1 = testSet.add;
|
||||
Set.prototype.add = function () {
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
add_1.apply(this, args);
|
||||
return this;
|
||||
};
|
||||
}
|
||||
var frozen = {};
|
||||
if (typeof Object.freeze === "function") {
|
||||
Object.freeze(frozen);
|
||||
}
|
||||
try {
|
||||
// If non-extensible objects can't be stored as keys in a Map, make sure we
|
||||
// do not freeze/seal/etc. an object without first attempting to put it in a
|
||||
// Map. For example, this gives the React Native Map polyfill a chance to tag
|
||||
// objects before they become non-extensible:
|
||||
// https://github.com/facebook/react-native/blob/98a6f19d7c/Libraries/vendor/core/Map.js#L44-L50
|
||||
// https://github.com/apollographql/react-apollo/issues/2442#issuecomment-426489517
|
||||
testMap.set(frozen, frozen).delete(frozen);
|
||||
}
|
||||
catch (_a) {
|
||||
var wrap = function (method) {
|
||||
return (method &&
|
||||
(function (obj) {
|
||||
try {
|
||||
// If .set succeeds, also call .delete to avoid leaking memory.
|
||||
testMap.set(obj, obj).delete(obj);
|
||||
}
|
||||
finally {
|
||||
// If .set or .delete fails, the exception will be silently swallowed
|
||||
// by this return-from-finally statement:
|
||||
return method.call(Object, obj);
|
||||
}
|
||||
}));
|
||||
};
|
||||
Object.freeze = wrap(Object.freeze);
|
||||
Object.seal = wrap(Object.seal);
|
||||
Object.preventExtensions = wrap(Object.preventExtensions);
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=fixPolyfills.native.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fixPolyfills.native.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"fixPolyfills.native.js","sourceRoot":"","sources":["../../../src/cache/inmemory/fixPolyfills.native.ts"],"names":[],"mappings":"AAAA,kEAAkE;AAClE,6DAA6D;AAC7D,IAAM,OAAO,GAAG,IAAI,GAAG,EAAE,CAAC;AAC1B,IAAI,OAAO,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,KAAK,OAAO,EAAE,CAAC;IAC1B,IAAA,KAAG,GAAK,OAAO,IAAZ,CAAa;IACxB,GAAG,CAAC,SAAS,CAAC,GAAG,GAAG;QAAU,cAAO;aAAP,UAAO,EAAP,qBAAO,EAAP,IAAO;YAAP,yBAAO;;QACnC,KAAG,CAAC,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QACtB,OAAO,IAAI,CAAC;IACd,CAAC,CAAC;AACJ,CAAC;AAED,kEAAkE;AAClE,IAAM,OAAO,GAAG,IAAI,GAAG,EAAE,CAAC;AAC1B,IAAI,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,KAAK,OAAO,EAAE,CAAC;IACvB,IAAA,KAAG,GAAK,OAAO,IAAZ,CAAa;IACxB,GAAG,CAAC,SAAS,CAAC,GAAG,GAAG;QAAU,cAAO;aAAP,UAAO,EAAP,qBAAO,EAAP,IAAO;YAAP,yBAAO;;QACnC,KAAG,CAAC,KAAK,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;QACtB,OAAO,IAAI,CAAC;IACd,CAAC,CAAC;AACJ,CAAC;AAED,IAAM,MAAM,GAAG,EAAE,CAAC;AAClB,IAAI,OAAO,MAAM,CAAC,MAAM,KAAK,UAAU,EAAE,CAAC;IACxC,MAAM,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;AACxB,CAAC;AAED,IAAI,CAAC;IACH,2EAA2E;IAC3E,4EAA4E;IAC5E,6EAA6E;IAC7E,6CAA6C;IAC7C,gGAAgG;IAChG,mFAAmF;IACnF,OAAO,CAAC,GAAG,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;AAC7C,CAAC;AAAC,WAAM,CAAC;IACP,IAAM,IAAI,GAAG,UAA6B,MAAS;QACjD,OAAO,CACL,MAAM;YACL,CAAC,UAAC,GAAG;gBACJ,IAAI,CAAC;oBACH,+DAA+D;oBAC/D,OAAO,CAAC,GAAG,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;gBACpC,CAAC;wBAAS,CAAC;oBACT,qEAAqE;oBACrE,yCAAyC;oBACzC,OAAO,MAAM,CAAC,IAAI,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;gBAClC,CAAC;YACH,CAAC,CAAO,CACT,CAAC;IACJ,CAAC,CAAC;IACF,MAAM,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;IACpC,MAAM,CAAC,IAAI,GAAG,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC;IAChC,MAAM,CAAC,iBAAiB,GAAG,IAAI,CAAC,MAAM,CAAC,iBAAiB,CAAC,CAAC;AAC5D,CAAC","sourcesContent":["// Make sure Map.prototype.set returns the Map instance, per spec.\n// https://github.com/apollographql/apollo-client/issues/4024\nconst testMap = new Map();\nif (testMap.set(1, 2) !== testMap) {\n const { set } = testMap;\n Map.prototype.set = function (...args) {\n set.apply(this, args);\n return this;\n };\n}\n\n// Make sure Set.prototype.add returns the Set instance, per spec.\nconst testSet = new Set();\nif (testSet.add(3) !== testSet) {\n const { add } = testSet;\n Set.prototype.add = function (...args) {\n add.apply(this, args);\n return this;\n };\n}\n\nconst frozen = {};\nif (typeof Object.freeze === \"function\") {\n Object.freeze(frozen);\n}\n\ntry {\n // If non-extensible objects can't be stored as keys in a Map, make sure we\n // do not freeze/seal/etc. an object without first attempting to put it in a\n // Map. For example, this gives the React Native Map polyfill a chance to tag\n // objects before they become non-extensible:\n // https://github.com/facebook/react-native/blob/98a6f19d7c/Libraries/vendor/core/Map.js#L44-L50\n // https://github.com/apollographql/react-apollo/issues/2442#issuecomment-426489517\n testMap.set(frozen, frozen).delete(frozen);\n} catch {\n const wrap = <M extends <T>(obj: T) => T>(method: M): M => {\n return (\n method &&\n (((obj) => {\n try {\n // If .set succeeds, also call .delete to avoid leaking memory.\n testMap.set(obj, obj).delete(obj);\n } finally {\n // If .set or .delete fails, the exception will be silently swallowed\n // by this return-from-finally statement:\n return method.call(Object, obj);\n }\n }) as M)\n );\n };\n Object.freeze = wrap(Object.freeze);\n Object.seal = wrap(Object.seal);\n Object.preventExtensions = wrap(Object.preventExtensions);\n}\n\nexport {};\n"]}
|
||||
9
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fragmentRegistry.d.ts
generated
vendored
Normal file
9
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fragmentRegistry.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
import type { DocumentNode, FragmentDefinitionNode } from "graphql";
|
||||
export interface FragmentRegistryAPI {
|
||||
register(...fragments: DocumentNode[]): this;
|
||||
lookup(fragmentName: string): FragmentDefinitionNode | null;
|
||||
transform<D extends DocumentNode>(document: D): D;
|
||||
resetCaches(): void;
|
||||
}
|
||||
export declare function createFragmentRegistry(...fragments: DocumentNode[]): FragmentRegistryAPI;
|
||||
//# sourceMappingURL=fragmentRegistry.d.ts.map
|
||||
140
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js
generated
vendored
Normal file
140
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
import { __assign, __spreadArray } from "tslib";
|
||||
import { visit } from "graphql";
|
||||
import { wrap } from "optimism";
|
||||
import { cacheSizes, getFragmentDefinitions, } from "../../utilities/index.js";
|
||||
import { WeakCache } from "@wry/caches";
|
||||
// As long as createFragmentRegistry is not imported or used, the
|
||||
// FragmentRegistry example implementation provided below should not be bundled
|
||||
// (by tree-shaking bundlers like Rollup), because the implementation of
|
||||
// InMemoryCache refers only to the TypeScript interface FragmentRegistryAPI,
|
||||
// never the concrete implementation FragmentRegistry (which is deliberately not
|
||||
// exported from this module).
|
||||
export function createFragmentRegistry() {
|
||||
var fragments = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
fragments[_i] = arguments[_i];
|
||||
}
|
||||
return new (FragmentRegistry.bind.apply(FragmentRegistry, __spreadArray([void 0], fragments, false)))();
|
||||
}
|
||||
var FragmentRegistry = /** @class */ (function () {
|
||||
// Call `createFragmentRegistry` instead of invoking the
|
||||
// FragmentRegistry constructor directly. This reserves the constructor for
|
||||
// future configuration of the FragmentRegistry.
|
||||
function FragmentRegistry() {
|
||||
var fragments = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
fragments[_i] = arguments[_i];
|
||||
}
|
||||
this.registry = Object.create(null);
|
||||
this.resetCaches();
|
||||
if (fragments.length) {
|
||||
this.register.apply(this, fragments);
|
||||
}
|
||||
}
|
||||
FragmentRegistry.prototype.register = function () {
|
||||
var _this = this;
|
||||
var fragments = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
fragments[_i] = arguments[_i];
|
||||
}
|
||||
var definitions = new Map();
|
||||
fragments.forEach(function (doc) {
|
||||
getFragmentDefinitions(doc).forEach(function (node) {
|
||||
definitions.set(node.name.value, node);
|
||||
});
|
||||
});
|
||||
definitions.forEach(function (node, name) {
|
||||
if (node !== _this.registry[name]) {
|
||||
_this.registry[name] = node;
|
||||
_this.invalidate(name);
|
||||
}
|
||||
});
|
||||
return this;
|
||||
};
|
||||
// Overridden in the resetCaches method below.
|
||||
FragmentRegistry.prototype.invalidate = function (name) { };
|
||||
FragmentRegistry.prototype.resetCaches = function () {
|
||||
var proto = FragmentRegistry.prototype;
|
||||
this.invalidate = (this.lookup = wrap(proto.lookup.bind(this), {
|
||||
makeCacheKey: function (arg) { return arg; },
|
||||
max: cacheSizes["fragmentRegistry.lookup"] ||
|
||||
1000 /* defaultCacheSizes["fragmentRegistry.lookup"] */,
|
||||
})).dirty; // This dirty function is bound to the wrapped lookup method.
|
||||
this.transform = wrap(proto.transform.bind(this), {
|
||||
cache: WeakCache,
|
||||
max: cacheSizes["fragmentRegistry.transform"] ||
|
||||
2000 /* defaultCacheSizes["fragmentRegistry.transform"] */,
|
||||
});
|
||||
this.findFragmentSpreads = wrap(proto.findFragmentSpreads.bind(this), {
|
||||
cache: WeakCache,
|
||||
max: cacheSizes["fragmentRegistry.findFragmentSpreads"] ||
|
||||
4000 /* defaultCacheSizes["fragmentRegistry.findFragmentSpreads"] */,
|
||||
});
|
||||
};
|
||||
/*
|
||||
* Note:
|
||||
* This method is only memoized so it can serve as a dependency to `tranform`,
|
||||
* so calling `invalidate` will invalidate cache entries for `transform`.
|
||||
*/
|
||||
FragmentRegistry.prototype.lookup = function (fragmentName) {
|
||||
return this.registry[fragmentName] || null;
|
||||
};
|
||||
FragmentRegistry.prototype.transform = function (document) {
|
||||
var _this = this;
|
||||
var defined = new Map();
|
||||
getFragmentDefinitions(document).forEach(function (def) {
|
||||
defined.set(def.name.value, def);
|
||||
});
|
||||
var unbound = new Set();
|
||||
var enqueue = function (spreadName) {
|
||||
if (!defined.has(spreadName)) {
|
||||
unbound.add(spreadName);
|
||||
}
|
||||
};
|
||||
var enqueueChildSpreads = function (node) {
|
||||
return Object.keys(_this.findFragmentSpreads(node)).forEach(enqueue);
|
||||
};
|
||||
enqueueChildSpreads(document);
|
||||
var missing = [];
|
||||
var map = Object.create(null);
|
||||
// This Set forEach loop can be extended during iteration by adding
|
||||
// additional strings to the unbound set.
|
||||
unbound.forEach(function (fragmentName) {
|
||||
var knownFragmentDef = defined.get(fragmentName);
|
||||
if (knownFragmentDef) {
|
||||
enqueueChildSpreads((map[fragmentName] = knownFragmentDef));
|
||||
}
|
||||
else {
|
||||
missing.push(fragmentName);
|
||||
var def = _this.lookup(fragmentName);
|
||||
if (def) {
|
||||
enqueueChildSpreads((map[fragmentName] = def));
|
||||
}
|
||||
}
|
||||
});
|
||||
if (missing.length) {
|
||||
var defsToAppend_1 = [];
|
||||
missing.forEach(function (name) {
|
||||
var def = map[name];
|
||||
if (def) {
|
||||
defsToAppend_1.push(def);
|
||||
}
|
||||
});
|
||||
if (defsToAppend_1.length) {
|
||||
document = __assign(__assign({}, document), { definitions: document.definitions.concat(defsToAppend_1) });
|
||||
}
|
||||
}
|
||||
return document;
|
||||
};
|
||||
FragmentRegistry.prototype.findFragmentSpreads = function (root) {
|
||||
var spreads = Object.create(null);
|
||||
visit(root, {
|
||||
FragmentSpread: function (node) {
|
||||
spreads[node.name.value] = node;
|
||||
},
|
||||
});
|
||||
return spreads;
|
||||
};
|
||||
return FragmentRegistry;
|
||||
}());
|
||||
//# sourceMappingURL=fragmentRegistry.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/fragmentRegistry.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
28
graphql-subscription/node_modules/@apollo/client/cache/inmemory/helpers.d.ts
generated
vendored
Normal file
28
graphql-subscription/node_modules/@apollo/client/cache/inmemory/helpers.d.ts
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
import type { DocumentNode, SelectionSetNode } from "graphql";
|
||||
import type { NormalizedCache, InMemoryCacheConfig } from "./types.js";
|
||||
import type { KeyFieldsContext } from "./policies.js";
|
||||
import type { FragmentRegistryAPI } from "./fragmentRegistry.js";
|
||||
import type { Reference, StoreValue, StoreObject, FragmentMap, FragmentMapFunction } from "../../utilities/index.js";
|
||||
import { DeepMerger, isArray } from "../../utilities/index.js";
|
||||
export declare const hasOwn: (v: PropertyKey) => boolean;
|
||||
export declare function isNullish(value: any): value is null | undefined;
|
||||
export { isArray };
|
||||
export declare function defaultDataIdFromObject({ __typename, id, _id }: Readonly<StoreObject>, context?: KeyFieldsContext): string | undefined;
|
||||
export declare function normalizeConfig(config: InMemoryCacheConfig): {
|
||||
dataIdFromObject: typeof defaultDataIdFromObject;
|
||||
addTypename: boolean;
|
||||
resultCaching: boolean;
|
||||
canonizeResults: boolean;
|
||||
} & InMemoryCacheConfig;
|
||||
export declare function shouldCanonizeResults(config: Pick<InMemoryCacheConfig, "canonizeResults">): boolean;
|
||||
export declare function getTypenameFromStoreObject(store: NormalizedCache, objectOrReference: StoreObject | Reference): string | undefined;
|
||||
export declare const TypeOrFieldNameRegExp: RegExp;
|
||||
export declare function fieldNameFromStoreName(storeFieldName: string): string;
|
||||
export declare function selectionSetMatchesResult(selectionSet: SelectionSetNode, result: Record<string, any>, variables?: Record<string, any>): boolean;
|
||||
export declare function storeValueIsStoreObject(value: StoreValue): value is StoreObject;
|
||||
export declare function makeProcessedFieldsMerger(): DeepMerger<any[]>;
|
||||
export declare function extractFragmentContext(document: DocumentNode, fragments?: FragmentRegistryAPI): {
|
||||
fragmentMap: FragmentMap;
|
||||
lookupFragment: FragmentMapFunction;
|
||||
};
|
||||
//# sourceMappingURL=helpers.d.ts.map
|
||||
96
graphql-subscription/node_modules/@apollo/client/cache/inmemory/helpers.js
generated
vendored
Normal file
96
graphql-subscription/node_modules/@apollo/client/cache/inmemory/helpers.js
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
import { isReference, isField, DeepMerger, resultKeyNameFromField, shouldInclude, isNonNullObject, compact, createFragmentMap, getFragmentDefinitions, isArray, } from "../../utilities/index.js";
|
||||
export var hasOwn = Object.prototype.hasOwnProperty;
|
||||
export function isNullish(value) {
|
||||
return value === null || value === void 0;
|
||||
}
|
||||
export { isArray };
|
||||
export function defaultDataIdFromObject(_a, context) {
|
||||
var __typename = _a.__typename, id = _a.id, _id = _a._id;
|
||||
if (typeof __typename === "string") {
|
||||
if (context) {
|
||||
context.keyObject =
|
||||
!isNullish(id) ? { id: id }
|
||||
: !isNullish(_id) ? { _id: _id }
|
||||
: void 0;
|
||||
}
|
||||
// If there is no object.id, fall back to object._id.
|
||||
if (isNullish(id) && !isNullish(_id)) {
|
||||
id = _id;
|
||||
}
|
||||
if (!isNullish(id)) {
|
||||
return "".concat(__typename, ":").concat(typeof id === "number" || typeof id === "string" ?
|
||||
id
|
||||
: JSON.stringify(id));
|
||||
}
|
||||
}
|
||||
}
|
||||
var defaultConfig = {
|
||||
dataIdFromObject: defaultDataIdFromObject,
|
||||
addTypename: true,
|
||||
resultCaching: true,
|
||||
// Thanks to the shouldCanonizeResults helper, this should be the only line
|
||||
// you have to change to reenable canonization by default in the future.
|
||||
canonizeResults: false,
|
||||
};
|
||||
export function normalizeConfig(config) {
|
||||
return compact(defaultConfig, config);
|
||||
}
|
||||
export function shouldCanonizeResults(config) {
|
||||
var value = config.canonizeResults;
|
||||
return value === void 0 ? defaultConfig.canonizeResults : value;
|
||||
}
|
||||
export function getTypenameFromStoreObject(store, objectOrReference) {
|
||||
return isReference(objectOrReference) ?
|
||||
store.get(objectOrReference.__ref, "__typename")
|
||||
: objectOrReference && objectOrReference.__typename;
|
||||
}
|
||||
export var TypeOrFieldNameRegExp = /^[_a-z][_0-9a-z]*/i;
|
||||
export function fieldNameFromStoreName(storeFieldName) {
|
||||
var match = storeFieldName.match(TypeOrFieldNameRegExp);
|
||||
return match ? match[0] : storeFieldName;
|
||||
}
|
||||
export function selectionSetMatchesResult(selectionSet, result, variables) {
|
||||
if (isNonNullObject(result)) {
|
||||
return isArray(result) ?
|
||||
result.every(function (item) {
|
||||
return selectionSetMatchesResult(selectionSet, item, variables);
|
||||
})
|
||||
: selectionSet.selections.every(function (field) {
|
||||
if (isField(field) && shouldInclude(field, variables)) {
|
||||
var key = resultKeyNameFromField(field);
|
||||
return (hasOwn.call(result, key) &&
|
||||
(!field.selectionSet ||
|
||||
selectionSetMatchesResult(field.selectionSet, result[key], variables)));
|
||||
}
|
||||
// If the selection has been skipped with @skip(true) or
|
||||
// @include(false), it should not count against the matching. If
|
||||
// the selection is not a field, it must be a fragment (inline or
|
||||
// named). We will determine if selectionSetMatchesResult for that
|
||||
// fragment when we get to it, so for now we return true.
|
||||
return true;
|
||||
});
|
||||
}
|
||||
return false;
|
||||
}
|
||||
export function storeValueIsStoreObject(value) {
|
||||
return isNonNullObject(value) && !isReference(value) && !isArray(value);
|
||||
}
|
||||
export function makeProcessedFieldsMerger() {
|
||||
return new DeepMerger();
|
||||
}
|
||||
export function extractFragmentContext(document, fragments) {
|
||||
// FragmentMap consisting only of fragments defined directly in document, not
|
||||
// including other fragments registered in the FragmentRegistry.
|
||||
var fragmentMap = createFragmentMap(getFragmentDefinitions(document));
|
||||
return {
|
||||
fragmentMap: fragmentMap,
|
||||
lookupFragment: function (name) {
|
||||
var def = fragmentMap[name];
|
||||
if (!def && fragments) {
|
||||
def = fragments.lookup(name);
|
||||
}
|
||||
return def || null;
|
||||
},
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=helpers.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/helpers.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/helpers.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
63
graphql-subscription/node_modules/@apollo/client/cache/inmemory/inMemoryCache.d.ts
generated
vendored
Normal file
63
graphql-subscription/node_modules/@apollo/client/cache/inmemory/inMemoryCache.d.ts
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
import "./fixPolyfills.js";
|
||||
import type { DocumentNode } from "graphql";
|
||||
import { ApolloCache } from "../core/cache.js";
|
||||
import type { Cache } from "../core/types/Cache.js";
|
||||
import type { StoreObject, Reference } from "../../utilities/index.js";
|
||||
import type { InMemoryCacheConfig, NormalizedCacheObject } from "./types.js";
|
||||
import { makeVar } from "./reactiveVars.js";
|
||||
import { Policies } from "./policies.js";
|
||||
import type { OperationVariables } from "../../core/index.js";
|
||||
import { getInMemoryCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
type BroadcastOptions = Pick<Cache.BatchOptions<InMemoryCache>, "optimistic" | "onWatchUpdated">;
|
||||
export declare class InMemoryCache extends ApolloCache<NormalizedCacheObject> {
|
||||
private data;
|
||||
private optimisticData;
|
||||
protected config: InMemoryCacheConfig;
|
||||
private watches;
|
||||
private addTypename;
|
||||
private storeReader;
|
||||
private storeWriter;
|
||||
private addTypenameTransform;
|
||||
private maybeBroadcastWatch;
|
||||
readonly assumeImmutableResults = true;
|
||||
readonly policies: Policies;
|
||||
readonly makeVar: typeof makeVar;
|
||||
constructor(config?: InMemoryCacheConfig);
|
||||
private init;
|
||||
private resetResultCache;
|
||||
restore(data: NormalizedCacheObject): this;
|
||||
extract(optimistic?: boolean): NormalizedCacheObject;
|
||||
read<T>(options: Cache.ReadOptions): T | null;
|
||||
write(options: Cache.WriteOptions): Reference | undefined;
|
||||
modify<Entity extends Record<string, any> = Record<string, any>>(options: Cache.ModifyOptions<Entity>): boolean;
|
||||
diff<TData, TVariables extends OperationVariables = any>(options: Cache.DiffOptions<TData, TVariables>): Cache.DiffResult<TData>;
|
||||
watch<TData = any, TVariables = any>(watch: Cache.WatchOptions<TData, TVariables>): () => void;
|
||||
gc(options?: {
|
||||
resetResultCache?: boolean;
|
||||
resetResultIdentities?: boolean;
|
||||
}): string[];
|
||||
retain(rootId: string, optimistic?: boolean): number;
|
||||
release(rootId: string, optimistic?: boolean): number;
|
||||
identify(object: StoreObject | Reference): string | undefined;
|
||||
evict(options: Cache.EvictOptions): boolean;
|
||||
reset(options?: Cache.ResetOptions): Promise<void>;
|
||||
removeOptimistic(idToRemove: string): void;
|
||||
private txCount;
|
||||
batch<TUpdateResult>(options: Cache.BatchOptions<InMemoryCache, TUpdateResult>): TUpdateResult;
|
||||
performTransaction(update: (cache: InMemoryCache) => any, optimisticId?: string | null): any;
|
||||
transformDocument(document: DocumentNode): DocumentNode;
|
||||
protected broadcastWatches(options?: BroadcastOptions): void;
|
||||
private addFragmentsToDocument;
|
||||
private addTypenameToDocument;
|
||||
private broadcastWatch;
|
||||
/**
|
||||
* @experimental
|
||||
* @internal
|
||||
* This is not a stable API - it is used in development builds to expose
|
||||
* information to the DevTools.
|
||||
* Use at your own risk!
|
||||
*/
|
||||
getMemoryInternals?: typeof getInMemoryCacheMemoryInternals;
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=inMemoryCache.d.ts.map
|
||||
472
graphql-subscription/node_modules/@apollo/client/cache/inmemory/inMemoryCache.js
generated
vendored
Normal file
472
graphql-subscription/node_modules/@apollo/client/cache/inmemory/inMemoryCache.js
generated
vendored
Normal file
@@ -0,0 +1,472 @@
|
||||
import { __assign, __extends } from "tslib";
|
||||
import { invariant } from "../../utilities/globals/index.js";
|
||||
// Make builtins like Map and Set safe to use with non-extensible objects.
|
||||
import "./fixPolyfills.js";
|
||||
import { wrap } from "optimism";
|
||||
import { equal } from "@wry/equality";
|
||||
import { ApolloCache } from "../core/cache.js";
|
||||
import { MissingFieldError } from "../core/types/common.js";
|
||||
import { addTypenameToDocument, isReference, DocumentTransform, canonicalStringify, print, cacheSizes, } from "../../utilities/index.js";
|
||||
import { StoreReader } from "./readFromStore.js";
|
||||
import { StoreWriter } from "./writeToStore.js";
|
||||
import { EntityStore, supportsResultCaching } from "./entityStore.js";
|
||||
import { makeVar, forgetCache, recallCache } from "./reactiveVars.js";
|
||||
import { Policies } from "./policies.js";
|
||||
import { hasOwn, normalizeConfig, shouldCanonizeResults } from "./helpers.js";
|
||||
import { getInMemoryCacheMemoryInternals } from "../../utilities/caching/getMemoryInternals.js";
|
||||
var InMemoryCache = /** @class */ (function (_super) {
|
||||
__extends(InMemoryCache, _super);
|
||||
function InMemoryCache(config) {
|
||||
if (config === void 0) { config = {}; }
|
||||
var _this = _super.call(this) || this;
|
||||
_this.watches = new Set();
|
||||
_this.addTypenameTransform = new DocumentTransform(addTypenameToDocument);
|
||||
// Override the default value, since InMemoryCache result objects are frozen
|
||||
// in development and expected to remain logically immutable in production.
|
||||
_this.assumeImmutableResults = true;
|
||||
_this.makeVar = makeVar;
|
||||
_this.txCount = 0;
|
||||
_this.config = normalizeConfig(config);
|
||||
_this.addTypename = !!_this.config.addTypename;
|
||||
_this.policies = new Policies({
|
||||
cache: _this,
|
||||
dataIdFromObject: _this.config.dataIdFromObject,
|
||||
possibleTypes: _this.config.possibleTypes,
|
||||
typePolicies: _this.config.typePolicies,
|
||||
});
|
||||
_this.init();
|
||||
return _this;
|
||||
}
|
||||
InMemoryCache.prototype.init = function () {
|
||||
// Passing { resultCaching: false } in the InMemoryCache constructor options
|
||||
// will completely disable dependency tracking, which will improve memory
|
||||
// usage but worsen the performance of repeated reads.
|
||||
var rootStore = (this.data = new EntityStore.Root({
|
||||
policies: this.policies,
|
||||
resultCaching: this.config.resultCaching,
|
||||
}));
|
||||
// When no optimistic writes are currently active, cache.optimisticData ===
|
||||
// cache.data, so there are no additional layers on top of the actual data.
|
||||
// When an optimistic update happens, this.optimisticData will become a
|
||||
// linked list of EntityStore Layer objects that terminates with the
|
||||
// original this.data cache object.
|
||||
this.optimisticData = rootStore.stump;
|
||||
this.resetResultCache();
|
||||
};
|
||||
InMemoryCache.prototype.resetResultCache = function (resetResultIdentities) {
|
||||
var _this = this;
|
||||
var previousReader = this.storeReader;
|
||||
var fragments = this.config.fragments;
|
||||
// The StoreWriter is mostly stateless and so doesn't really need to be
|
||||
// reset, but it does need to have its writer.storeReader reference updated,
|
||||
// so it's simpler to update this.storeWriter as well.
|
||||
this.storeWriter = new StoreWriter(this, (this.storeReader = new StoreReader({
|
||||
cache: this,
|
||||
addTypename: this.addTypename,
|
||||
resultCacheMaxSize: this.config.resultCacheMaxSize,
|
||||
canonizeResults: shouldCanonizeResults(this.config),
|
||||
canon: resetResultIdentities ? void 0 : (previousReader && previousReader.canon),
|
||||
fragments: fragments,
|
||||
})), fragments);
|
||||
this.maybeBroadcastWatch = wrap(function (c, options) {
|
||||
return _this.broadcastWatch(c, options);
|
||||
}, {
|
||||
max: this.config.resultCacheMaxSize ||
|
||||
cacheSizes["inMemoryCache.maybeBroadcastWatch"] ||
|
||||
5000 /* defaultCacheSizes["inMemoryCache.maybeBroadcastWatch"] */,
|
||||
makeCacheKey: function (c) {
|
||||
// Return a cache key (thus enabling result caching) only if we're
|
||||
// currently using a data store that can track cache dependencies.
|
||||
var store = c.optimistic ? _this.optimisticData : _this.data;
|
||||
if (supportsResultCaching(store)) {
|
||||
var optimistic = c.optimistic, id = c.id, variables = c.variables;
|
||||
return store.makeCacheKey(c.query,
|
||||
// Different watches can have the same query, optimistic
|
||||
// status, rootId, and variables, but if their callbacks are
|
||||
// different, the (identical) result needs to be delivered to
|
||||
// each distinct callback. The easiest way to achieve that
|
||||
// separation is to include c.callback in the cache key for
|
||||
// maybeBroadcastWatch calls. See issue #5733.
|
||||
c.callback, canonicalStringify({ optimistic: optimistic, id: id, variables: variables }));
|
||||
}
|
||||
},
|
||||
});
|
||||
// Since we have thrown away all the cached functions that depend on the
|
||||
// CacheGroup dependencies maintained by EntityStore, we should also reset
|
||||
// all CacheGroup dependency information.
|
||||
new Set([this.data.group, this.optimisticData.group]).forEach(function (group) {
|
||||
return group.resetCaching();
|
||||
});
|
||||
};
|
||||
InMemoryCache.prototype.restore = function (data) {
|
||||
this.init();
|
||||
// Since calling this.init() discards/replaces the entire StoreReader, along
|
||||
// with the result caches it maintains, this.data.replace(data) won't have
|
||||
// to bother deleting the old data.
|
||||
if (data)
|
||||
this.data.replace(data);
|
||||
return this;
|
||||
};
|
||||
InMemoryCache.prototype.extract = function (optimistic) {
|
||||
if (optimistic === void 0) { optimistic = false; }
|
||||
return (optimistic ? this.optimisticData : this.data).extract();
|
||||
};
|
||||
InMemoryCache.prototype.read = function (options) {
|
||||
var
|
||||
// Since read returns data or null, without any additional metadata
|
||||
// about whether/where there might have been missing fields, the
|
||||
// default behavior cannot be returnPartialData = true (like it is
|
||||
// for the diff method), since defaulting to true would violate the
|
||||
// integrity of the T in the return type. However, partial data may
|
||||
// be useful in some cases, so returnPartialData:true may be
|
||||
// specified explicitly.
|
||||
_a = options.returnPartialData,
|
||||
// Since read returns data or null, without any additional metadata
|
||||
// about whether/where there might have been missing fields, the
|
||||
// default behavior cannot be returnPartialData = true (like it is
|
||||
// for the diff method), since defaulting to true would violate the
|
||||
// integrity of the T in the return type. However, partial data may
|
||||
// be useful in some cases, so returnPartialData:true may be
|
||||
// specified explicitly.
|
||||
returnPartialData = _a === void 0 ? false : _a;
|
||||
try {
|
||||
return (this.storeReader.diffQueryAgainstStore(__assign(__assign({}, options), { store: options.optimistic ? this.optimisticData : this.data, config: this.config, returnPartialData: returnPartialData })).result || null);
|
||||
}
|
||||
catch (e) {
|
||||
if (e instanceof MissingFieldError) {
|
||||
// Swallow MissingFieldError and return null, so callers do not need to
|
||||
// worry about catching "normal" exceptions resulting from incomplete
|
||||
// cache data. Unexpected errors will be re-thrown. If you need more
|
||||
// information about which fields were missing, use cache.diff instead,
|
||||
// and examine diffResult.missing.
|
||||
return null;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.write = function (options) {
|
||||
try {
|
||||
++this.txCount;
|
||||
return this.storeWriter.writeToStore(this.data, options);
|
||||
}
|
||||
finally {
|
||||
if (!--this.txCount && options.broadcast !== false) {
|
||||
this.broadcastWatches();
|
||||
}
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.modify = function (options) {
|
||||
if (hasOwn.call(options, "id") && !options.id) {
|
||||
// To my knowledge, TypeScript does not currently provide a way to
|
||||
// enforce that an optional property?:type must *not* be undefined
|
||||
// when present. That ability would be useful here, because we want
|
||||
// options.id to default to ROOT_QUERY only when no options.id was
|
||||
// provided. If the caller attempts to pass options.id with a
|
||||
// falsy/undefined value (perhaps because cache.identify failed), we
|
||||
// should not assume the goal was to modify the ROOT_QUERY object.
|
||||
// We could throw, but it seems natural to return false to indicate
|
||||
// that nothing was modified.
|
||||
return false;
|
||||
}
|
||||
var store = ((options.optimistic) // Defaults to false.
|
||||
) ?
|
||||
this.optimisticData
|
||||
: this.data;
|
||||
try {
|
||||
++this.txCount;
|
||||
return store.modify(options.id || "ROOT_QUERY", options.fields);
|
||||
}
|
||||
finally {
|
||||
if (!--this.txCount && options.broadcast !== false) {
|
||||
this.broadcastWatches();
|
||||
}
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.diff = function (options) {
|
||||
return this.storeReader.diffQueryAgainstStore(__assign(__assign({}, options), { store: options.optimistic ? this.optimisticData : this.data, rootId: options.id || "ROOT_QUERY", config: this.config }));
|
||||
};
|
||||
InMemoryCache.prototype.watch = function (watch) {
|
||||
var _this = this;
|
||||
if (!this.watches.size) {
|
||||
// In case we previously called forgetCache(this) because
|
||||
// this.watches became empty (see below), reattach this cache to any
|
||||
// reactive variables on which it previously depended. It might seem
|
||||
// paradoxical that we're able to recall something we supposedly
|
||||
// forgot, but the point of calling forgetCache(this) is to silence
|
||||
// useless broadcasts while this.watches is empty, and to allow the
|
||||
// cache to be garbage collected. If, however, we manage to call
|
||||
// recallCache(this) here, this cache object must not have been
|
||||
// garbage collected yet, and should resume receiving updates from
|
||||
// reactive variables, now that it has a watcher to notify.
|
||||
recallCache(this);
|
||||
}
|
||||
this.watches.add(watch);
|
||||
if (watch.immediate) {
|
||||
this.maybeBroadcastWatch(watch);
|
||||
}
|
||||
return function () {
|
||||
// Once we remove the last watch from this.watches, cache.broadcastWatches
|
||||
// no longer does anything, so we preemptively tell the reactive variable
|
||||
// system to exclude this cache from future broadcasts.
|
||||
if (_this.watches.delete(watch) && !_this.watches.size) {
|
||||
forgetCache(_this);
|
||||
}
|
||||
// Remove this watch from the LRU cache managed by the
|
||||
// maybeBroadcastWatch OptimisticWrapperFunction, to prevent memory
|
||||
// leaks involving the closure of watch.callback.
|
||||
_this.maybeBroadcastWatch.forget(watch);
|
||||
};
|
||||
};
|
||||
InMemoryCache.prototype.gc = function (options) {
|
||||
var _a;
|
||||
canonicalStringify.reset();
|
||||
print.reset();
|
||||
this.addTypenameTransform.resetCache();
|
||||
(_a = this.config.fragments) === null || _a === void 0 ? void 0 : _a.resetCaches();
|
||||
var ids = this.optimisticData.gc();
|
||||
if (options && !this.txCount) {
|
||||
if (options.resetResultCache) {
|
||||
this.resetResultCache(options.resetResultIdentities);
|
||||
}
|
||||
else if (options.resetResultIdentities) {
|
||||
this.storeReader.resetCanon();
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
};
|
||||
// Call this method to ensure the given root ID remains in the cache after
|
||||
// garbage collection, along with its transitive child entities. Note that
|
||||
// the cache automatically retains all directly written entities. By default,
|
||||
// the retainment persists after optimistic updates are removed. Pass true
|
||||
// for the optimistic argument if you would prefer for the retainment to be
|
||||
// discarded when the top-most optimistic layer is removed. Returns the
|
||||
// resulting (non-negative) retainment count.
|
||||
InMemoryCache.prototype.retain = function (rootId, optimistic) {
|
||||
return (optimistic ? this.optimisticData : this.data).retain(rootId);
|
||||
};
|
||||
// Call this method to undo the effect of the retain method, above. Once the
|
||||
// retainment count falls to zero, the given ID will no longer be preserved
|
||||
// during garbage collection, though it may still be preserved by other safe
|
||||
// entities that refer to it. Returns the resulting (non-negative) retainment
|
||||
// count, in case that's useful.
|
||||
InMemoryCache.prototype.release = function (rootId, optimistic) {
|
||||
return (optimistic ? this.optimisticData : this.data).release(rootId);
|
||||
};
|
||||
// Returns the canonical ID for a given StoreObject, obeying typePolicies
|
||||
// and keyFields (and dataIdFromObject, if you still use that). At minimum,
|
||||
// the object must contain a __typename and any primary key fields required
|
||||
// to identify entities of that type. If you pass a query result object, be
|
||||
// sure that none of the primary key fields have been renamed by aliasing.
|
||||
// If you pass a Reference object, its __ref ID string will be returned.
|
||||
InMemoryCache.prototype.identify = function (object) {
|
||||
if (isReference(object))
|
||||
return object.__ref;
|
||||
try {
|
||||
return this.policies.identify(object)[0];
|
||||
}
|
||||
catch (e) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(e);
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.evict = function (options) {
|
||||
if (!options.id) {
|
||||
if (hasOwn.call(options, "id")) {
|
||||
// See comment in modify method about why we return false when
|
||||
// options.id exists but is falsy/undefined.
|
||||
return false;
|
||||
}
|
||||
options = __assign(__assign({}, options), { id: "ROOT_QUERY" });
|
||||
}
|
||||
try {
|
||||
// It's unlikely that the eviction will end up invoking any other
|
||||
// cache update operations while it's running, but {in,de}crementing
|
||||
// this.txCount still seems like a good idea, for uniformity with
|
||||
// the other update methods.
|
||||
++this.txCount;
|
||||
// Pass this.data as a limit on the depth of the eviction, so evictions
|
||||
// during optimistic updates (when this.data is temporarily set equal to
|
||||
// this.optimisticData) do not escape their optimistic Layer.
|
||||
return this.optimisticData.evict(options, this.data);
|
||||
}
|
||||
finally {
|
||||
if (!--this.txCount && options.broadcast !== false) {
|
||||
this.broadcastWatches();
|
||||
}
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.reset = function (options) {
|
||||
var _this = this;
|
||||
this.init();
|
||||
canonicalStringify.reset();
|
||||
if (options && options.discardWatches) {
|
||||
// Similar to what happens in the unsubscribe function returned by
|
||||
// cache.watch, applied to all current watches.
|
||||
this.watches.forEach(function (watch) { return _this.maybeBroadcastWatch.forget(watch); });
|
||||
this.watches.clear();
|
||||
forgetCache(this);
|
||||
}
|
||||
else {
|
||||
// Calling this.init() above unblocks all maybeBroadcastWatch caching, so
|
||||
// this.broadcastWatches() triggers a broadcast to every current watcher
|
||||
// (letting them know their data is now missing). This default behavior is
|
||||
// convenient because it means the watches do not have to be manually
|
||||
// reestablished after resetting the cache. To prevent this broadcast and
|
||||
// cancel all watches, pass true for options.discardWatches.
|
||||
this.broadcastWatches();
|
||||
}
|
||||
return Promise.resolve();
|
||||
};
|
||||
InMemoryCache.prototype.removeOptimistic = function (idToRemove) {
|
||||
var newOptimisticData = this.optimisticData.removeLayer(idToRemove);
|
||||
if (newOptimisticData !== this.optimisticData) {
|
||||
this.optimisticData = newOptimisticData;
|
||||
this.broadcastWatches();
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.batch = function (options) {
|
||||
var _this = this;
|
||||
var update = options.update, _a = options.optimistic, optimistic = _a === void 0 ? true : _a, removeOptimistic = options.removeOptimistic, onWatchUpdated = options.onWatchUpdated;
|
||||
var updateResult;
|
||||
var perform = function (layer) {
|
||||
var _a = _this, data = _a.data, optimisticData = _a.optimisticData;
|
||||
++_this.txCount;
|
||||
if (layer) {
|
||||
_this.data = _this.optimisticData = layer;
|
||||
}
|
||||
try {
|
||||
return (updateResult = update(_this));
|
||||
}
|
||||
finally {
|
||||
--_this.txCount;
|
||||
_this.data = data;
|
||||
_this.optimisticData = optimisticData;
|
||||
}
|
||||
};
|
||||
var alreadyDirty = new Set();
|
||||
if (onWatchUpdated && !this.txCount) {
|
||||
// If an options.onWatchUpdated callback is provided, we want to call it
|
||||
// with only the Cache.WatchOptions objects affected by options.update,
|
||||
// but there might be dirty watchers already waiting to be broadcast that
|
||||
// have nothing to do with the update. To prevent including those watchers
|
||||
// in the post-update broadcast, we perform this initial broadcast to
|
||||
// collect the dirty watchers, so we can re-dirty them later, after the
|
||||
// post-update broadcast, allowing them to receive their pending
|
||||
// broadcasts the next time broadcastWatches is called, just as they would
|
||||
// if we never called cache.batch.
|
||||
this.broadcastWatches(__assign(__assign({}, options), { onWatchUpdated: function (watch) {
|
||||
alreadyDirty.add(watch);
|
||||
return false;
|
||||
} }));
|
||||
}
|
||||
if (typeof optimistic === "string") {
|
||||
// Note that there can be multiple layers with the same optimistic ID.
|
||||
// When removeOptimistic(id) is called for that id, all matching layers
|
||||
// will be removed, and the remaining layers will be reapplied.
|
||||
this.optimisticData = this.optimisticData.addLayer(optimistic, perform);
|
||||
}
|
||||
else if (optimistic === false) {
|
||||
// Ensure both this.data and this.optimisticData refer to the root
|
||||
// (non-optimistic) layer of the cache during the update. Note that
|
||||
// this.data could be a Layer if we are currently executing an optimistic
|
||||
// update function, but otherwise will always be an EntityStore.Root
|
||||
// instance.
|
||||
perform(this.data);
|
||||
}
|
||||
else {
|
||||
// Otherwise, leave this.data and this.optimisticData unchanged and run
|
||||
// the update with broadcast batching.
|
||||
perform();
|
||||
}
|
||||
if (typeof removeOptimistic === "string") {
|
||||
this.optimisticData = this.optimisticData.removeLayer(removeOptimistic);
|
||||
}
|
||||
// Note: if this.txCount > 0, then alreadyDirty.size === 0, so this code
|
||||
// takes the else branch and calls this.broadcastWatches(options), which
|
||||
// does nothing when this.txCount > 0.
|
||||
if (onWatchUpdated && alreadyDirty.size) {
|
||||
this.broadcastWatches(__assign(__assign({}, options), { onWatchUpdated: function (watch, diff) {
|
||||
var result = onWatchUpdated.call(this, watch, diff);
|
||||
if (result !== false) {
|
||||
// Since onWatchUpdated did not return false, this diff is
|
||||
// about to be broadcast to watch.callback, so we don't need
|
||||
// to re-dirty it with the other alreadyDirty watches below.
|
||||
alreadyDirty.delete(watch);
|
||||
}
|
||||
return result;
|
||||
} }));
|
||||
// Silently re-dirty any watches that were already dirty before the update
|
||||
// was performed, and were not broadcast just now.
|
||||
if (alreadyDirty.size) {
|
||||
alreadyDirty.forEach(function (watch) { return _this.maybeBroadcastWatch.dirty(watch); });
|
||||
}
|
||||
}
|
||||
else {
|
||||
// If alreadyDirty is empty or we don't have an onWatchUpdated
|
||||
// function, we don't need to go to the trouble of wrapping
|
||||
// options.onWatchUpdated.
|
||||
this.broadcastWatches(options);
|
||||
}
|
||||
return updateResult;
|
||||
};
|
||||
InMemoryCache.prototype.performTransaction = function (update, optimisticId) {
|
||||
return this.batch({
|
||||
update: update,
|
||||
optimistic: optimisticId || optimisticId !== null,
|
||||
});
|
||||
};
|
||||
InMemoryCache.prototype.transformDocument = function (document) {
|
||||
return this.addTypenameToDocument(this.addFragmentsToDocument(document));
|
||||
};
|
||||
InMemoryCache.prototype.broadcastWatches = function (options) {
|
||||
var _this = this;
|
||||
if (!this.txCount) {
|
||||
this.watches.forEach(function (c) { return _this.maybeBroadcastWatch(c, options); });
|
||||
}
|
||||
};
|
||||
InMemoryCache.prototype.addFragmentsToDocument = function (document) {
|
||||
var fragments = this.config.fragments;
|
||||
return fragments ? fragments.transform(document) : document;
|
||||
};
|
||||
InMemoryCache.prototype.addTypenameToDocument = function (document) {
|
||||
if (this.addTypename) {
|
||||
return this.addTypenameTransform.transformDocument(document);
|
||||
}
|
||||
return document;
|
||||
};
|
||||
// This method is wrapped by maybeBroadcastWatch, which is called by
|
||||
// broadcastWatches, so that we compute and broadcast results only when
|
||||
// the data that would be broadcast might have changed. It would be
|
||||
// simpler to check for changes after recomputing a result but before
|
||||
// broadcasting it, but this wrapping approach allows us to skip both
|
||||
// the recomputation and the broadcast, in most cases.
|
||||
InMemoryCache.prototype.broadcastWatch = function (c, options) {
|
||||
var lastDiff = c.lastDiff;
|
||||
// Both WatchOptions and DiffOptions extend ReadOptions, and DiffOptions
|
||||
// currently requires no additional properties, so we can use c (a
|
||||
// WatchOptions object) as DiffOptions, without having to allocate a new
|
||||
// object, and without having to enumerate the relevant properties (query,
|
||||
// variables, etc.) explicitly. There will be some additional properties
|
||||
// (lastDiff, callback, etc.), but cache.diff ignores them.
|
||||
var diff = this.diff(c);
|
||||
if (options) {
|
||||
if (c.optimistic && typeof options.optimistic === "string") {
|
||||
diff.fromOptimisticTransaction = true;
|
||||
}
|
||||
if (options.onWatchUpdated &&
|
||||
options.onWatchUpdated.call(this, c, diff, lastDiff) === false) {
|
||||
// Returning false from the onWatchUpdated callback will prevent
|
||||
// calling c.callback(diff) for this watcher.
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!lastDiff || !equal(lastDiff.result, diff.result)) {
|
||||
c.callback((c.lastDiff = diff), lastDiff);
|
||||
}
|
||||
};
|
||||
return InMemoryCache;
|
||||
}(ApolloCache));
|
||||
export { InMemoryCache };
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
InMemoryCache.prototype.getMemoryInternals = getInMemoryCacheMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=inMemoryCache.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
9
graphql-subscription/node_modules/@apollo/client/cache/inmemory/key-extractor.d.ts
generated
vendored
Normal file
9
graphql-subscription/node_modules/@apollo/client/cache/inmemory/key-extractor.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
import type { KeySpecifier, KeyFieldsFunction, KeyArgsFunction } from "./policies.js";
|
||||
export declare function keyFieldsFnFromSpecifier(specifier: KeySpecifier): KeyFieldsFunction;
|
||||
export declare function keyArgsFnFromSpecifier(specifier: KeySpecifier): KeyArgsFunction;
|
||||
export declare function collectSpecifierPaths(specifier: KeySpecifier, extractor: (path: string[]) => any): Record<string, any>;
|
||||
export declare function getSpecifierPaths(spec: KeySpecifier): string[][];
|
||||
declare function extractKey<TObj extends Record<string, any>, TKey extends string>(object: TObj, key: TKey): TObj[TKey] | undefined;
|
||||
export declare function extractKeyPath(object: Record<string, any>, path: string[], extract?: typeof extractKey): any;
|
||||
export {};
|
||||
//# sourceMappingURL=key-extractor.d.ts.map
|
||||
192
graphql-subscription/node_modules/@apollo/client/cache/inmemory/key-extractor.js
generated
vendored
Normal file
192
graphql-subscription/node_modules/@apollo/client/cache/inmemory/key-extractor.js
generated
vendored
Normal file
@@ -0,0 +1,192 @@
|
||||
import { invariant } from "../../utilities/globals/index.js";
|
||||
import { argumentsObjectFromField, DeepMerger, isNonEmptyArray, isNonNullObject, } from "../../utilities/index.js";
|
||||
import { hasOwn, isArray } from "./helpers.js";
|
||||
// Mapping from JSON-encoded KeySpecifier strings to associated information.
|
||||
var specifierInfoCache = Object.create(null);
|
||||
function lookupSpecifierInfo(spec) {
|
||||
// It's safe to encode KeySpecifier arrays with JSON.stringify, since they're
|
||||
// just arrays of strings or nested KeySpecifier arrays, and the order of the
|
||||
// array elements is important (and suitably preserved by JSON.stringify).
|
||||
var cacheKey = JSON.stringify(spec);
|
||||
return (specifierInfoCache[cacheKey] ||
|
||||
(specifierInfoCache[cacheKey] = Object.create(null)));
|
||||
}
|
||||
export function keyFieldsFnFromSpecifier(specifier) {
|
||||
var info = lookupSpecifierInfo(specifier);
|
||||
return (info.keyFieldsFn || (info.keyFieldsFn = function (object, context) {
|
||||
var extract = function (from, key) {
|
||||
return context.readField(key, from);
|
||||
};
|
||||
var keyObject = (context.keyObject = collectSpecifierPaths(specifier, function (schemaKeyPath) {
|
||||
var extracted = extractKeyPath(context.storeObject, schemaKeyPath,
|
||||
// Using context.readField to extract paths from context.storeObject
|
||||
// allows the extraction to see through Reference objects and respect
|
||||
// custom read functions.
|
||||
extract);
|
||||
if (extracted === void 0 &&
|
||||
object !== context.storeObject &&
|
||||
hasOwn.call(object, schemaKeyPath[0])) {
|
||||
// If context.storeObject fails to provide a value for the requested
|
||||
// path, fall back to the raw result object, if it has a top-level key
|
||||
// matching the first key in the path (schemaKeyPath[0]). This allows
|
||||
// key fields included in the written data to be saved in the cache
|
||||
// even if they are not selected explicitly in context.selectionSet.
|
||||
// Not being mentioned by context.selectionSet is convenient here,
|
||||
// since it means these extra fields cannot be affected by field
|
||||
// aliasing, which is why we can use extractKey instead of
|
||||
// context.readField for this extraction.
|
||||
extracted = extractKeyPath(object, schemaKeyPath, extractKey);
|
||||
}
|
||||
invariant(extracted !== void 0, 4, schemaKeyPath.join("."), object);
|
||||
return extracted;
|
||||
}));
|
||||
return "".concat(context.typename, ":").concat(JSON.stringify(keyObject));
|
||||
}));
|
||||
}
|
||||
// The keyArgs extraction process is roughly analogous to keyFields extraction,
|
||||
// but there are no aliases involved, missing fields are tolerated (by merely
|
||||
// omitting them from the key), and drawing from field.directives or variables
|
||||
// is allowed (in addition to drawing from the field's arguments object).
|
||||
// Concretely, these differences mean passing a different key path extractor
|
||||
// function to collectSpecifierPaths, reusing the shared extractKeyPath helper
|
||||
// wherever possible.
|
||||
export function keyArgsFnFromSpecifier(specifier) {
|
||||
var info = lookupSpecifierInfo(specifier);
|
||||
return (info.keyArgsFn ||
|
||||
(info.keyArgsFn = function (args, _a) {
|
||||
var field = _a.field, variables = _a.variables, fieldName = _a.fieldName;
|
||||
var collected = collectSpecifierPaths(specifier, function (keyPath) {
|
||||
var firstKey = keyPath[0];
|
||||
var firstChar = firstKey.charAt(0);
|
||||
if (firstChar === "@") {
|
||||
if (field && isNonEmptyArray(field.directives)) {
|
||||
var directiveName_1 = firstKey.slice(1);
|
||||
// If the directive appears multiple times, only the first
|
||||
// occurrence's arguments will be used. TODO Allow repetition?
|
||||
// TODO Cache this work somehow, a la aliasMap?
|
||||
var d = field.directives.find(function (d) { return d.name.value === directiveName_1; });
|
||||
// Fortunately argumentsObjectFromField works for DirectiveNode!
|
||||
var directiveArgs = d && argumentsObjectFromField(d, variables);
|
||||
// For directives without arguments (d defined, but directiveArgs ===
|
||||
// null), the presence or absence of the directive still counts as
|
||||
// part of the field key, so we return null in those cases. If no
|
||||
// directive with this name was found for this field (d undefined and
|
||||
// thus directiveArgs undefined), we return undefined, which causes
|
||||
// this value to be omitted from the key object returned by
|
||||
// collectSpecifierPaths.
|
||||
return (directiveArgs &&
|
||||
extractKeyPath(directiveArgs,
|
||||
// If keyPath.length === 1, this code calls extractKeyPath with an
|
||||
// empty path, which works because it uses directiveArgs as the
|
||||
// extracted value.
|
||||
keyPath.slice(1)));
|
||||
}
|
||||
// If the key started with @ but there was no corresponding directive,
|
||||
// we want to omit this value from the key object, not fall through to
|
||||
// treating @whatever as a normal argument name.
|
||||
return;
|
||||
}
|
||||
if (firstChar === "$") {
|
||||
var variableName = firstKey.slice(1);
|
||||
if (variables && hasOwn.call(variables, variableName)) {
|
||||
var varKeyPath = keyPath.slice(0);
|
||||
varKeyPath[0] = variableName;
|
||||
return extractKeyPath(variables, varKeyPath);
|
||||
}
|
||||
// If the key started with $ but there was no corresponding variable, we
|
||||
// want to omit this value from the key object, not fall through to
|
||||
// treating $whatever as a normal argument name.
|
||||
return;
|
||||
}
|
||||
if (args) {
|
||||
return extractKeyPath(args, keyPath);
|
||||
}
|
||||
});
|
||||
var suffix = JSON.stringify(collected);
|
||||
// If no arguments were passed to this field, and it didn't have any other
|
||||
// field key contributions from directives or variables, hide the empty
|
||||
// :{} suffix from the field key. However, a field passed no arguments can
|
||||
// still end up with a non-empty :{...} suffix if its key configuration
|
||||
// refers to directives or variables.
|
||||
if (args || suffix !== "{}") {
|
||||
fieldName += ":" + suffix;
|
||||
}
|
||||
return fieldName;
|
||||
}));
|
||||
}
|
||||
export function collectSpecifierPaths(specifier, extractor) {
|
||||
// For each path specified by specifier, invoke the extractor, and repeatedly
|
||||
// merge the results together, with appropriate ancestor context.
|
||||
var merger = new DeepMerger();
|
||||
return getSpecifierPaths(specifier).reduce(function (collected, path) {
|
||||
var _a;
|
||||
var toMerge = extractor(path);
|
||||
if (toMerge !== void 0) {
|
||||
// This path is not expected to contain array indexes, so the toMerge
|
||||
// reconstruction will not contain arrays. TODO Fix this?
|
||||
for (var i = path.length - 1; i >= 0; --i) {
|
||||
toMerge = (_a = {}, _a[path[i]] = toMerge, _a);
|
||||
}
|
||||
collected = merger.merge(collected, toMerge);
|
||||
}
|
||||
return collected;
|
||||
}, Object.create(null));
|
||||
}
|
||||
export function getSpecifierPaths(spec) {
|
||||
var info = lookupSpecifierInfo(spec);
|
||||
if (!info.paths) {
|
||||
var paths_1 = (info.paths = []);
|
||||
var currentPath_1 = [];
|
||||
spec.forEach(function (s, i) {
|
||||
if (isArray(s)) {
|
||||
getSpecifierPaths(s).forEach(function (p) { return paths_1.push(currentPath_1.concat(p)); });
|
||||
currentPath_1.length = 0;
|
||||
}
|
||||
else {
|
||||
currentPath_1.push(s);
|
||||
if (!isArray(spec[i + 1])) {
|
||||
paths_1.push(currentPath_1.slice(0));
|
||||
currentPath_1.length = 0;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return info.paths;
|
||||
}
|
||||
function extractKey(object, key) {
|
||||
return object[key];
|
||||
}
|
||||
export function extractKeyPath(object, path, extract) {
|
||||
// For each key in path, extract the corresponding child property from obj,
|
||||
// flattening arrays if encountered (uncommon for keyFields and keyArgs, but
|
||||
// possible). The final result of path.reduce is normalized so unexpected leaf
|
||||
// objects have their keys safely sorted. That final result is difficult to
|
||||
// type as anything other than any. You're welcome to try to improve the
|
||||
// return type, but keep in mind extractKeyPath is not a public function
|
||||
// (exported only for testing), so the effort may not be worthwhile unless the
|
||||
// limited set of actual callers (see above) pass arguments that TypeScript
|
||||
// can statically type. If we know only that path is some array of strings
|
||||
// (and not, say, a specific tuple of statically known strings), any (or
|
||||
// possibly unknown) is the honest answer.
|
||||
extract = extract || extractKey;
|
||||
return normalize(path.reduce(function reducer(obj, key) {
|
||||
return isArray(obj) ?
|
||||
obj.map(function (child) { return reducer(child, key); })
|
||||
: obj && extract(obj, key);
|
||||
}, object));
|
||||
}
|
||||
function normalize(value) {
|
||||
// Usually the extracted value will be a scalar value, since most primary
|
||||
// key fields are scalar, but just in case we get an object or an array, we
|
||||
// need to do some normalization of the order of (nested) keys.
|
||||
if (isNonNullObject(value)) {
|
||||
if (isArray(value)) {
|
||||
return value.map(normalize);
|
||||
}
|
||||
return collectSpecifierPaths(Object.keys(value).sort(), function (path) {
|
||||
return extractKeyPath(value, path);
|
||||
});
|
||||
}
|
||||
return value;
|
||||
}
|
||||
//# sourceMappingURL=key-extractor.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/key-extractor.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/key-extractor.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
12
graphql-subscription/node_modules/@apollo/client/cache/inmemory/object-canon.d.ts
generated
vendored
Normal file
12
graphql-subscription/node_modules/@apollo/client/cache/inmemory/object-canon.d.ts
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
export declare class ObjectCanon {
|
||||
private known;
|
||||
private pool;
|
||||
isKnown(value: any): boolean;
|
||||
private passes;
|
||||
pass<T>(value: T): T;
|
||||
admit<T>(value: T): T;
|
||||
private sortedKeys;
|
||||
private keysByJSON;
|
||||
readonly empty: {};
|
||||
}
|
||||
//# sourceMappingURL=object-canon.d.ts.map
|
||||
181
graphql-subscription/node_modules/@apollo/client/cache/inmemory/object-canon.js
generated
vendored
Normal file
181
graphql-subscription/node_modules/@apollo/client/cache/inmemory/object-canon.js
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
import { __assign } from "tslib";
|
||||
import { Trie } from "@wry/trie";
|
||||
import { canUseWeakMap, canUseWeakSet, isNonNullObject as isObjectOrArray, } from "../../utilities/index.js";
|
||||
import { isArray } from "./helpers.js";
|
||||
function shallowCopy(value) {
|
||||
if (isObjectOrArray(value)) {
|
||||
return isArray(value) ?
|
||||
value.slice(0)
|
||||
: __assign({ __proto__: Object.getPrototypeOf(value) }, value);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
// When programmers talk about the "canonical form" of an object, they
|
||||
// usually have the following meaning in mind, which I've copied from
|
||||
// https://en.wiktionary.org/wiki/canonical_form:
|
||||
//
|
||||
// 1. A standard or normal presentation of a mathematical entity [or
|
||||
// object]. A canonical form is an element of a set of representatives
|
||||
// of equivalence classes of forms such that there is a function or
|
||||
// procedure which projects every element of each equivalence class
|
||||
// onto that one element, the canonical form of that equivalence
|
||||
// class. The canonical form is expected to be simpler than the rest of
|
||||
// the forms in some way.
|
||||
//
|
||||
// That's a long-winded way of saying any two objects that have the same
|
||||
// canonical form may be considered equivalent, even if they are !==,
|
||||
// which usually means the objects are structurally equivalent (deeply
|
||||
// equal), but don't necessarily use the same memory.
|
||||
//
|
||||
// Like a literary or musical canon, this ObjectCanon class represents a
|
||||
// collection of unique canonical items (JavaScript objects), with the
|
||||
// important property that canon.admit(a) === canon.admit(b) if a and b
|
||||
// are deeply equal to each other. In terms of the definition above, the
|
||||
// canon.admit method is the "function or procedure which projects every"
|
||||
// object "onto that one element, the canonical form."
|
||||
//
|
||||
// In the worst case, the canonicalization process may involve looking at
|
||||
// every property in the provided object tree, so it takes the same order
|
||||
// of time as deep equality checking. Fortunately, already-canonicalized
|
||||
// objects are returned immediately from canon.admit, so the presence of
|
||||
// canonical subtrees tends to speed up canonicalization.
|
||||
//
|
||||
// Since consumers of canonical objects can check for deep equality in
|
||||
// constant time, canonicalizing cache results can massively improve the
|
||||
// performance of application code that skips re-rendering unchanged
|
||||
// results, such as "pure" UI components in a framework like React.
|
||||
//
|
||||
// Of course, since canonical objects may be shared widely between
|
||||
// unrelated consumers, it's important to think of them as immutable, even
|
||||
// though they are not actually frozen with Object.freeze in production,
|
||||
// due to the extra performance overhead that comes with frozen objects.
|
||||
//
|
||||
// Custom scalar objects whose internal class name is neither Array nor
|
||||
// Object can be included safely in the admitted tree, but they will not
|
||||
// be replaced with a canonical version (to put it another way, they are
|
||||
// assumed to be canonical already).
|
||||
//
|
||||
// If we ignore custom objects, no detection of cycles or repeated object
|
||||
// references is currently required by the StoreReader class, since
|
||||
// GraphQL result objects are JSON-serializable trees (and thus contain
|
||||
// neither cycles nor repeated subtrees), so we can avoid the complexity
|
||||
// of keeping track of objects we've already seen during the recursion of
|
||||
// the admit method.
|
||||
//
|
||||
// In the future, we may consider adding additional cases to the switch
|
||||
// statement to handle other common object types, such as "[object Date]"
|
||||
// objects, as needed.
|
||||
var ObjectCanon = /** @class */ (function () {
|
||||
function ObjectCanon() {
|
||||
// Set of all canonical objects this ObjectCanon has admitted, allowing
|
||||
// canon.admit to return previously-canonicalized objects immediately.
|
||||
this.known = new (canUseWeakSet ? WeakSet : Set)();
|
||||
// Efficient storage/lookup structure for canonical objects.
|
||||
this.pool = new Trie(canUseWeakMap);
|
||||
// Make the ObjectCanon assume this value has already been
|
||||
// canonicalized.
|
||||
this.passes = new WeakMap();
|
||||
// Arrays that contain the same elements in a different order can share
|
||||
// the same SortedKeysInfo object, to save memory.
|
||||
this.keysByJSON = new Map();
|
||||
// This has to come last because it depends on keysByJSON.
|
||||
this.empty = this.admit({});
|
||||
}
|
||||
ObjectCanon.prototype.isKnown = function (value) {
|
||||
return isObjectOrArray(value) && this.known.has(value);
|
||||
};
|
||||
ObjectCanon.prototype.pass = function (value) {
|
||||
if (isObjectOrArray(value)) {
|
||||
var copy = shallowCopy(value);
|
||||
this.passes.set(copy, value);
|
||||
return copy;
|
||||
}
|
||||
return value;
|
||||
};
|
||||
ObjectCanon.prototype.admit = function (value) {
|
||||
var _this = this;
|
||||
if (isObjectOrArray(value)) {
|
||||
var original = this.passes.get(value);
|
||||
if (original)
|
||||
return original;
|
||||
var proto = Object.getPrototypeOf(value);
|
||||
switch (proto) {
|
||||
case Array.prototype: {
|
||||
if (this.known.has(value))
|
||||
return value;
|
||||
var array = value.map(this.admit, this);
|
||||
// Arrays are looked up in the Trie using their recursively
|
||||
// canonicalized elements, and the known version of the array is
|
||||
// preserved as node.array.
|
||||
var node = this.pool.lookupArray(array);
|
||||
if (!node.array) {
|
||||
this.known.add((node.array = array));
|
||||
// Since canonical arrays may be shared widely between
|
||||
// unrelated consumers, it's important to regard them as
|
||||
// immutable, even if they are not frozen in production.
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
Object.freeze(array);
|
||||
}
|
||||
}
|
||||
return node.array;
|
||||
}
|
||||
case null:
|
||||
case Object.prototype: {
|
||||
if (this.known.has(value))
|
||||
return value;
|
||||
var proto_1 = Object.getPrototypeOf(value);
|
||||
var array_1 = [proto_1];
|
||||
var keys = this.sortedKeys(value);
|
||||
array_1.push(keys.json);
|
||||
var firstValueIndex_1 = array_1.length;
|
||||
keys.sorted.forEach(function (key) {
|
||||
array_1.push(_this.admit(value[key]));
|
||||
});
|
||||
// Objects are looked up in the Trie by their prototype (which
|
||||
// is *not* recursively canonicalized), followed by a JSON
|
||||
// representation of their (sorted) keys, followed by the
|
||||
// sequence of recursively canonicalized values corresponding to
|
||||
// those keys. To keep the final results unambiguous with other
|
||||
// sequences (such as arrays that just happen to contain [proto,
|
||||
// keys.json, value1, value2, ...]), the known version of the
|
||||
// object is stored as node.object.
|
||||
var node = this.pool.lookupArray(array_1);
|
||||
if (!node.object) {
|
||||
var obj_1 = (node.object = Object.create(proto_1));
|
||||
this.known.add(obj_1);
|
||||
keys.sorted.forEach(function (key, i) {
|
||||
obj_1[key] = array_1[firstValueIndex_1 + i];
|
||||
});
|
||||
// Since canonical objects may be shared widely between
|
||||
// unrelated consumers, it's important to regard them as
|
||||
// immutable, even if they are not frozen in production.
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
Object.freeze(obj_1);
|
||||
}
|
||||
}
|
||||
return node.object;
|
||||
}
|
||||
}
|
||||
}
|
||||
return value;
|
||||
};
|
||||
// It's worthwhile to cache the sorting of arrays of strings, since the
|
||||
// same initial unsorted arrays tend to be encountered many times.
|
||||
// Fortunately, we can reuse the Trie machinery to look up the sorted
|
||||
// arrays in linear time (which is faster than sorting large arrays).
|
||||
ObjectCanon.prototype.sortedKeys = function (obj) {
|
||||
var keys = Object.keys(obj);
|
||||
var node = this.pool.lookupArray(keys);
|
||||
if (!node.keys) {
|
||||
keys.sort();
|
||||
var json = JSON.stringify(keys);
|
||||
if (!(node.keys = this.keysByJSON.get(json))) {
|
||||
this.keysByJSON.set(json, (node.keys = { sorted: keys, json: json }));
|
||||
}
|
||||
}
|
||||
return node.keys;
|
||||
};
|
||||
return ObjectCanon;
|
||||
}());
|
||||
export { ObjectCanon };
|
||||
//# sourceMappingURL=object-canon.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/object-canon.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/object-canon.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
97
graphql-subscription/node_modules/@apollo/client/cache/inmemory/policies.d.ts
generated
vendored
Normal file
97
graphql-subscription/node_modules/@apollo/client/cache/inmemory/policies.d.ts
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
import type { InlineFragmentNode, FragmentDefinitionNode, SelectionSetNode, FieldNode } from "graphql";
|
||||
import type { FragmentMap, StoreValue, StoreObject, Reference } from "../../utilities/index.js";
|
||||
import { isReference } from "../../utilities/index.js";
|
||||
import type { IdGetter, MergeInfo, ReadMergeModifyContext } from "./types.js";
|
||||
import type { InMemoryCache } from "./inMemoryCache.js";
|
||||
import type { SafeReadonly, FieldSpecifier, ToReferenceFunction, ReadFieldFunction, ReadFieldOptions, CanReadFunction } from "../core/types/common.js";
|
||||
import type { WriteContext } from "./writeToStore.js";
|
||||
export type TypePolicies = {
|
||||
[__typename: string]: TypePolicy;
|
||||
};
|
||||
export type KeySpecifier = ReadonlyArray<string | KeySpecifier>;
|
||||
export type KeyFieldsContext = {
|
||||
typename: string | undefined;
|
||||
storeObject: StoreObject;
|
||||
readField: ReadFieldFunction;
|
||||
selectionSet?: SelectionSetNode;
|
||||
fragmentMap?: FragmentMap;
|
||||
keyObject?: Record<string, any>;
|
||||
};
|
||||
export type KeyFieldsFunction = (object: Readonly<StoreObject>, context: KeyFieldsContext) => KeySpecifier | false | ReturnType<IdGetter>;
|
||||
export type TypePolicy = {
|
||||
keyFields?: KeySpecifier | KeyFieldsFunction | false;
|
||||
merge?: FieldMergeFunction | boolean;
|
||||
queryType?: true;
|
||||
mutationType?: true;
|
||||
subscriptionType?: true;
|
||||
fields?: {
|
||||
[fieldName: string]: FieldPolicy<any> | FieldReadFunction<any>;
|
||||
};
|
||||
};
|
||||
export type KeyArgsFunction = (args: Record<string, any> | null, context: {
|
||||
typename: string;
|
||||
fieldName: string;
|
||||
field: FieldNode | null;
|
||||
variables?: Record<string, any>;
|
||||
}) => KeySpecifier | false | ReturnType<IdGetter>;
|
||||
export type FieldPolicy<TExisting = any, TIncoming = TExisting, TReadResult = TIncoming, TOptions extends FieldFunctionOptions = FieldFunctionOptions> = {
|
||||
keyArgs?: KeySpecifier | KeyArgsFunction | false;
|
||||
read?: FieldReadFunction<TExisting, TReadResult, TOptions>;
|
||||
merge?: FieldMergeFunction<TExisting, TIncoming, TOptions> | boolean;
|
||||
};
|
||||
export type StorageType = Record<string, any>;
|
||||
export interface FieldFunctionOptions<TArgs = Record<string, any>, TVars = Record<string, any>> {
|
||||
args: TArgs | null;
|
||||
fieldName: string;
|
||||
storeFieldName: string;
|
||||
field: FieldNode | null;
|
||||
variables?: TVars;
|
||||
isReference: typeof isReference;
|
||||
toReference: ToReferenceFunction;
|
||||
storage: StorageType;
|
||||
cache: InMemoryCache;
|
||||
readField: ReadFieldFunction;
|
||||
canRead: CanReadFunction;
|
||||
mergeObjects: MergeObjectsFunction;
|
||||
}
|
||||
type MergeObjectsFunction = <T extends StoreObject | Reference>(existing: T, incoming: T) => T;
|
||||
export type FieldReadFunction<TExisting = any, TReadResult = TExisting, TOptions extends FieldFunctionOptions = FieldFunctionOptions> = (existing: SafeReadonly<TExisting> | undefined, options: TOptions) => TReadResult | undefined;
|
||||
export type FieldMergeFunction<TExisting = any, TIncoming = TExisting, TOptions extends FieldFunctionOptions = FieldFunctionOptions> = (existing: SafeReadonly<TExisting> | undefined, incoming: SafeReadonly<TIncoming>, options: TOptions) => SafeReadonly<TExisting>;
|
||||
export type PossibleTypesMap = {
|
||||
[supertype: string]: string[];
|
||||
};
|
||||
export declare class Policies {
|
||||
private config;
|
||||
private typePolicies;
|
||||
private toBeAdded;
|
||||
private supertypeMap;
|
||||
private fuzzySubtypes;
|
||||
readonly cache: InMemoryCache;
|
||||
readonly rootIdsByTypename: Record<string, string>;
|
||||
readonly rootTypenamesById: Record<string, string>;
|
||||
readonly usingPossibleTypes = false;
|
||||
constructor(config: {
|
||||
cache: InMemoryCache;
|
||||
dataIdFromObject?: KeyFieldsFunction;
|
||||
possibleTypes?: PossibleTypesMap;
|
||||
typePolicies?: TypePolicies;
|
||||
});
|
||||
identify(object: StoreObject, partialContext?: Partial<KeyFieldsContext>): [string?, StoreObject?];
|
||||
addTypePolicies(typePolicies: TypePolicies): void;
|
||||
private updateTypePolicy;
|
||||
private setRootTypename;
|
||||
addPossibleTypes(possibleTypes: PossibleTypesMap): void;
|
||||
private getTypePolicy;
|
||||
private getFieldPolicy;
|
||||
private getSupertypeSet;
|
||||
fragmentMatches(fragment: InlineFragmentNode | FragmentDefinitionNode, typename: string | undefined, result?: Record<string, any>, variables?: Record<string, any>): boolean;
|
||||
hasKeyArgs(typename: string | undefined, fieldName: string): boolean;
|
||||
getStoreFieldName(fieldSpec: FieldSpecifier): string;
|
||||
readField<V = StoreValue>(options: ReadFieldOptions, context: ReadMergeModifyContext): SafeReadonly<V> | undefined;
|
||||
getReadFunction(typename: string | undefined, fieldName: string): FieldReadFunction | undefined;
|
||||
getMergeFunction(parentTypename: string | undefined, fieldName: string, childTypename: string | undefined): FieldMergeFunction | undefined;
|
||||
runMergeFunction(existing: StoreValue, incoming: StoreValue, { field, typename, merge }: MergeInfo, context: WriteContext, storage?: StorageType): any;
|
||||
}
|
||||
export declare function normalizeReadFieldOptions(readFieldArgs: IArguments, objectOrReference: StoreObject | Reference | undefined, variables?: ReadMergeModifyContext["variables"]): ReadFieldOptions;
|
||||
export {};
|
||||
//# sourceMappingURL=policies.d.ts.map
|
||||
601
graphql-subscription/node_modules/@apollo/client/cache/inmemory/policies.js
generated
vendored
Normal file
601
graphql-subscription/node_modules/@apollo/client/cache/inmemory/policies.js
generated
vendored
Normal file
@@ -0,0 +1,601 @@
|
||||
import { __assign, __rest } from "tslib";
|
||||
import { invariant, newInvariantError } from "../../utilities/globals/index.js";
|
||||
import { storeKeyNameFromField, argumentsObjectFromField, isReference, getStoreKeyName, isNonNullObject, stringifyForDisplay, } from "../../utilities/index.js";
|
||||
import { hasOwn, fieldNameFromStoreName, storeValueIsStoreObject, selectionSetMatchesResult, TypeOrFieldNameRegExp, defaultDataIdFromObject, isArray, } from "./helpers.js";
|
||||
import { cacheSlot } from "./reactiveVars.js";
|
||||
import { keyArgsFnFromSpecifier, keyFieldsFnFromSpecifier, } from "./key-extractor.js";
|
||||
function argsFromFieldSpecifier(spec) {
|
||||
return (spec.args !== void 0 ? spec.args
|
||||
: spec.field ? argumentsObjectFromField(spec.field, spec.variables)
|
||||
: null);
|
||||
}
|
||||
var nullKeyFieldsFn = function () { return void 0; };
|
||||
var simpleKeyArgsFn = function (_args, context) { return context.fieldName; };
|
||||
// These merge functions can be selected by specifying merge:true or
|
||||
// merge:false in a field policy.
|
||||
var mergeTrueFn = function (existing, incoming, _a) {
|
||||
var mergeObjects = _a.mergeObjects;
|
||||
return mergeObjects(existing, incoming);
|
||||
};
|
||||
var mergeFalseFn = function (_, incoming) { return incoming; };
|
||||
var Policies = /** @class */ (function () {
|
||||
function Policies(config) {
|
||||
this.config = config;
|
||||
this.typePolicies = Object.create(null);
|
||||
this.toBeAdded = Object.create(null);
|
||||
// Map from subtype names to sets of supertype names. Note that this
|
||||
// representation inverts the structure of possibleTypes (whose keys are
|
||||
// supertypes and whose values are arrays of subtypes) because it tends
|
||||
// to be much more efficient to search upwards than downwards.
|
||||
this.supertypeMap = new Map();
|
||||
// Any fuzzy subtypes specified by possibleTypes will be converted to
|
||||
// RegExp objects and recorded here. Every key of this map can also be
|
||||
// found in supertypeMap. In many cases this Map will be empty, which
|
||||
// means no fuzzy subtype checking will happen in fragmentMatches.
|
||||
this.fuzzySubtypes = new Map();
|
||||
this.rootIdsByTypename = Object.create(null);
|
||||
this.rootTypenamesById = Object.create(null);
|
||||
this.usingPossibleTypes = false;
|
||||
this.config = __assign({ dataIdFromObject: defaultDataIdFromObject }, config);
|
||||
this.cache = this.config.cache;
|
||||
this.setRootTypename("Query");
|
||||
this.setRootTypename("Mutation");
|
||||
this.setRootTypename("Subscription");
|
||||
if (config.possibleTypes) {
|
||||
this.addPossibleTypes(config.possibleTypes);
|
||||
}
|
||||
if (config.typePolicies) {
|
||||
this.addTypePolicies(config.typePolicies);
|
||||
}
|
||||
}
|
||||
Policies.prototype.identify = function (object, partialContext) {
|
||||
var _a;
|
||||
var policies = this;
|
||||
var typename = (partialContext &&
|
||||
(partialContext.typename || ((_a = partialContext.storeObject) === null || _a === void 0 ? void 0 : _a.__typename))) ||
|
||||
object.__typename;
|
||||
// It should be possible to write root Query fields with writeFragment,
|
||||
// using { __typename: "Query", ... } as the data, but it does not make
|
||||
// sense to allow the same identification behavior for the Mutation and
|
||||
// Subscription types, since application code should never be writing
|
||||
// directly to (or reading directly from) those root objects.
|
||||
if (typename === this.rootTypenamesById.ROOT_QUERY) {
|
||||
return ["ROOT_QUERY"];
|
||||
}
|
||||
// Default context.storeObject to object if not otherwise provided.
|
||||
var storeObject = (partialContext && partialContext.storeObject) || object;
|
||||
var context = __assign(__assign({}, partialContext), { typename: typename, storeObject: storeObject, readField: (partialContext && partialContext.readField) ||
|
||||
function () {
|
||||
var options = normalizeReadFieldOptions(arguments, storeObject);
|
||||
return policies.readField(options, {
|
||||
store: policies.cache["data"],
|
||||
variables: options.variables,
|
||||
});
|
||||
} });
|
||||
var id;
|
||||
var policy = typename && this.getTypePolicy(typename);
|
||||
var keyFn = (policy && policy.keyFn) || this.config.dataIdFromObject;
|
||||
while (keyFn) {
|
||||
var specifierOrId = keyFn(__assign(__assign({}, object), storeObject), context);
|
||||
if (isArray(specifierOrId)) {
|
||||
keyFn = keyFieldsFnFromSpecifier(specifierOrId);
|
||||
}
|
||||
else {
|
||||
id = specifierOrId;
|
||||
break;
|
||||
}
|
||||
}
|
||||
id = id ? String(id) : void 0;
|
||||
return context.keyObject ? [id, context.keyObject] : [id];
|
||||
};
|
||||
Policies.prototype.addTypePolicies = function (typePolicies) {
|
||||
var _this = this;
|
||||
Object.keys(typePolicies).forEach(function (typename) {
|
||||
var _a = typePolicies[typename], queryType = _a.queryType, mutationType = _a.mutationType, subscriptionType = _a.subscriptionType, incoming = __rest(_a, ["queryType", "mutationType", "subscriptionType"]);
|
||||
// Though {query,mutation,subscription}Type configurations are rare,
|
||||
// it's important to call setRootTypename as early as possible,
|
||||
// since these configurations should apply consistently for the
|
||||
// entire lifetime of the cache. Also, since only one __typename can
|
||||
// qualify as one of these root types, these three properties cannot
|
||||
// be inherited, unlike the rest of the incoming properties. That
|
||||
// restriction is convenient, because the purpose of this.toBeAdded
|
||||
// is to delay the processing of type/field policies until the first
|
||||
// time they're used, allowing policies to be added in any order as
|
||||
// long as all relevant policies (including policies for supertypes)
|
||||
// have been added by the time a given policy is used for the first
|
||||
// time. In other words, since inheritance doesn't matter for these
|
||||
// properties, there's also no need to delay their processing using
|
||||
// the this.toBeAdded queue.
|
||||
if (queryType)
|
||||
_this.setRootTypename("Query", typename);
|
||||
if (mutationType)
|
||||
_this.setRootTypename("Mutation", typename);
|
||||
if (subscriptionType)
|
||||
_this.setRootTypename("Subscription", typename);
|
||||
if (hasOwn.call(_this.toBeAdded, typename)) {
|
||||
_this.toBeAdded[typename].push(incoming);
|
||||
}
|
||||
else {
|
||||
_this.toBeAdded[typename] = [incoming];
|
||||
}
|
||||
});
|
||||
};
|
||||
Policies.prototype.updateTypePolicy = function (typename, incoming) {
|
||||
var _this = this;
|
||||
var existing = this.getTypePolicy(typename);
|
||||
var keyFields = incoming.keyFields, fields = incoming.fields;
|
||||
function setMerge(existing, merge) {
|
||||
existing.merge =
|
||||
typeof merge === "function" ? merge
|
||||
// Pass merge:true as a shorthand for a merge implementation
|
||||
// that returns options.mergeObjects(existing, incoming).
|
||||
: merge === true ? mergeTrueFn
|
||||
// Pass merge:false to make incoming always replace existing
|
||||
// without any warnings about data clobbering.
|
||||
: merge === false ? mergeFalseFn
|
||||
: existing.merge;
|
||||
}
|
||||
// Type policies can define merge functions, as an alternative to
|
||||
// using field policies to merge child objects.
|
||||
setMerge(existing, incoming.merge);
|
||||
existing.keyFn =
|
||||
// Pass false to disable normalization for this typename.
|
||||
keyFields === false ? nullKeyFieldsFn
|
||||
// Pass an array of strings to use those fields to compute a
|
||||
// composite ID for objects of this typename.
|
||||
: isArray(keyFields) ? keyFieldsFnFromSpecifier(keyFields)
|
||||
// Pass a function to take full control over identification.
|
||||
: typeof keyFields === "function" ? keyFields
|
||||
// Leave existing.keyFn unchanged if above cases fail.
|
||||
: existing.keyFn;
|
||||
if (fields) {
|
||||
Object.keys(fields).forEach(function (fieldName) {
|
||||
var existing = _this.getFieldPolicy(typename, fieldName, true);
|
||||
var incoming = fields[fieldName];
|
||||
if (typeof incoming === "function") {
|
||||
existing.read = incoming;
|
||||
}
|
||||
else {
|
||||
var keyArgs = incoming.keyArgs, read = incoming.read, merge = incoming.merge;
|
||||
existing.keyFn =
|
||||
// Pass false to disable argument-based differentiation of
|
||||
// field identities.
|
||||
keyArgs === false ? simpleKeyArgsFn
|
||||
// Pass an array of strings to use named arguments to
|
||||
// compute a composite identity for the field.
|
||||
: isArray(keyArgs) ? keyArgsFnFromSpecifier(keyArgs)
|
||||
// Pass a function to take full control over field identity.
|
||||
: typeof keyArgs === "function" ? keyArgs
|
||||
// Leave existing.keyFn unchanged if above cases fail.
|
||||
: existing.keyFn;
|
||||
if (typeof read === "function") {
|
||||
existing.read = read;
|
||||
}
|
||||
setMerge(existing, merge);
|
||||
}
|
||||
if (existing.read && existing.merge) {
|
||||
// If we have both a read and a merge function, assume
|
||||
// keyArgs:false, because read and merge together can take
|
||||
// responsibility for interpreting arguments in and out. This
|
||||
// default assumption can always be overridden by specifying
|
||||
// keyArgs explicitly in the FieldPolicy.
|
||||
existing.keyFn = existing.keyFn || simpleKeyArgsFn;
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
Policies.prototype.setRootTypename = function (which, typename) {
|
||||
if (typename === void 0) { typename = which; }
|
||||
var rootId = "ROOT_" + which.toUpperCase();
|
||||
var old = this.rootTypenamesById[rootId];
|
||||
if (typename !== old) {
|
||||
invariant(!old || old === which, 5, which);
|
||||
// First, delete any old __typename associated with this rootId from
|
||||
// rootIdsByTypename.
|
||||
if (old)
|
||||
delete this.rootIdsByTypename[old];
|
||||
// Now make this the only __typename that maps to this rootId.
|
||||
this.rootIdsByTypename[typename] = rootId;
|
||||
// Finally, update the __typename associated with this rootId.
|
||||
this.rootTypenamesById[rootId] = typename;
|
||||
}
|
||||
};
|
||||
Policies.prototype.addPossibleTypes = function (possibleTypes) {
|
||||
var _this = this;
|
||||
this.usingPossibleTypes = true;
|
||||
Object.keys(possibleTypes).forEach(function (supertype) {
|
||||
// Make sure all types have an entry in this.supertypeMap, even if
|
||||
// their supertype set is empty, so we can return false immediately
|
||||
// from policies.fragmentMatches for unknown supertypes.
|
||||
_this.getSupertypeSet(supertype, true);
|
||||
possibleTypes[supertype].forEach(function (subtype) {
|
||||
_this.getSupertypeSet(subtype, true).add(supertype);
|
||||
var match = subtype.match(TypeOrFieldNameRegExp);
|
||||
if (!match || match[0] !== subtype) {
|
||||
// TODO Don't interpret just any invalid typename as a RegExp.
|
||||
_this.fuzzySubtypes.set(subtype, new RegExp(subtype));
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
Policies.prototype.getTypePolicy = function (typename) {
|
||||
var _this = this;
|
||||
if (!hasOwn.call(this.typePolicies, typename)) {
|
||||
var policy_1 = (this.typePolicies[typename] = Object.create(null));
|
||||
policy_1.fields = Object.create(null);
|
||||
// When the TypePolicy for typename is first accessed, instead of
|
||||
// starting with an empty policy object, inherit any properties or
|
||||
// fields from the type policies of the supertypes of typename.
|
||||
//
|
||||
// Any properties or fields defined explicitly within the TypePolicy
|
||||
// for typename will take precedence, and if there are multiple
|
||||
// supertypes, the properties of policies whose types were added
|
||||
// later via addPossibleTypes will take precedence over those of
|
||||
// earlier supertypes. TODO Perhaps we should warn about these
|
||||
// conflicts in development, and recommend defining the property
|
||||
// explicitly in the subtype policy?
|
||||
//
|
||||
// Field policy inheritance is atomic/shallow: you can't inherit a
|
||||
// field policy and then override just its read function, since read
|
||||
// and merge functions often need to cooperate, so changing only one
|
||||
// of them would be a recipe for inconsistency.
|
||||
//
|
||||
// Once the TypePolicy for typename has been accessed, its properties can
|
||||
// still be updated directly using addTypePolicies, but future changes to
|
||||
// inherited supertype policies will not be reflected in this subtype
|
||||
// policy, because this code runs at most once per typename.
|
||||
var supertypes_1 = this.supertypeMap.get(typename);
|
||||
if (!supertypes_1 && this.fuzzySubtypes.size) {
|
||||
// To make the inheritance logic work for unknown typename strings that
|
||||
// may have fuzzy supertypes, we give this typename an empty supertype
|
||||
// set and then populate it with any fuzzy supertypes that match.
|
||||
supertypes_1 = this.getSupertypeSet(typename, true);
|
||||
// This only works for typenames that are directly matched by a fuzzy
|
||||
// supertype. What if there is an intermediate chain of supertypes?
|
||||
// While possible, that situation can only be solved effectively by
|
||||
// specifying the intermediate relationships via possibleTypes, manually
|
||||
// and in a non-fuzzy way.
|
||||
this.fuzzySubtypes.forEach(function (regExp, fuzzy) {
|
||||
if (regExp.test(typename)) {
|
||||
// The fuzzy parameter is just the original string version of regExp
|
||||
// (not a valid __typename string), but we can look up the
|
||||
// associated supertype(s) in this.supertypeMap.
|
||||
var fuzzySupertypes = _this.supertypeMap.get(fuzzy);
|
||||
if (fuzzySupertypes) {
|
||||
fuzzySupertypes.forEach(function (supertype) {
|
||||
return supertypes_1.add(supertype);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
if (supertypes_1 && supertypes_1.size) {
|
||||
supertypes_1.forEach(function (supertype) {
|
||||
var _a = _this.getTypePolicy(supertype), fields = _a.fields, rest = __rest(_a, ["fields"]);
|
||||
Object.assign(policy_1, rest);
|
||||
Object.assign(policy_1.fields, fields);
|
||||
});
|
||||
}
|
||||
}
|
||||
var inbox = this.toBeAdded[typename];
|
||||
if (inbox && inbox.length) {
|
||||
// Merge the pending policies into this.typePolicies, in the order they
|
||||
// were originally passed to addTypePolicy.
|
||||
inbox.splice(0).forEach(function (policy) {
|
||||
_this.updateTypePolicy(typename, policy);
|
||||
});
|
||||
}
|
||||
return this.typePolicies[typename];
|
||||
};
|
||||
Policies.prototype.getFieldPolicy = function (typename, fieldName, createIfMissing) {
|
||||
if (typename) {
|
||||
var fieldPolicies = this.getTypePolicy(typename).fields;
|
||||
return (fieldPolicies[fieldName] ||
|
||||
(createIfMissing && (fieldPolicies[fieldName] = Object.create(null))));
|
||||
}
|
||||
};
|
||||
Policies.prototype.getSupertypeSet = function (subtype, createIfMissing) {
|
||||
var supertypeSet = this.supertypeMap.get(subtype);
|
||||
if (!supertypeSet && createIfMissing) {
|
||||
this.supertypeMap.set(subtype, (supertypeSet = new Set()));
|
||||
}
|
||||
return supertypeSet;
|
||||
};
|
||||
Policies.prototype.fragmentMatches = function (fragment, typename, result, variables) {
|
||||
var _this = this;
|
||||
if (!fragment.typeCondition)
|
||||
return true;
|
||||
// If the fragment has a type condition but the object we're matching
|
||||
// against does not have a __typename, the fragment cannot match.
|
||||
if (!typename)
|
||||
return false;
|
||||
var supertype = fragment.typeCondition.name.value;
|
||||
// Common case: fragment type condition and __typename are the same.
|
||||
if (typename === supertype)
|
||||
return true;
|
||||
if (this.usingPossibleTypes && this.supertypeMap.has(supertype)) {
|
||||
var typenameSupertypeSet = this.getSupertypeSet(typename, true);
|
||||
var workQueue_1 = [typenameSupertypeSet];
|
||||
var maybeEnqueue_1 = function (subtype) {
|
||||
var supertypeSet = _this.getSupertypeSet(subtype, false);
|
||||
if (supertypeSet &&
|
||||
supertypeSet.size &&
|
||||
workQueue_1.indexOf(supertypeSet) < 0) {
|
||||
workQueue_1.push(supertypeSet);
|
||||
}
|
||||
};
|
||||
// We need to check fuzzy subtypes only if we encountered fuzzy
|
||||
// subtype strings in addPossibleTypes, and only while writing to
|
||||
// the cache, since that's when selectionSetMatchesResult gives a
|
||||
// strong signal of fragment matching. The StoreReader class calls
|
||||
// policies.fragmentMatches without passing a result object, so
|
||||
// needToCheckFuzzySubtypes is always false while reading.
|
||||
var needToCheckFuzzySubtypes = !!(result && this.fuzzySubtypes.size);
|
||||
var checkingFuzzySubtypes = false;
|
||||
// It's important to keep evaluating workQueue.length each time through
|
||||
// the loop, because the queue can grow while we're iterating over it.
|
||||
for (var i = 0; i < workQueue_1.length; ++i) {
|
||||
var supertypeSet = workQueue_1[i];
|
||||
if (supertypeSet.has(supertype)) {
|
||||
if (!typenameSupertypeSet.has(supertype)) {
|
||||
if (checkingFuzzySubtypes) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(6, typename, supertype);
|
||||
}
|
||||
// Record positive results for faster future lookup.
|
||||
// Unfortunately, we cannot safely cache negative results,
|
||||
// because new possibleTypes data could always be added to the
|
||||
// Policies class.
|
||||
typenameSupertypeSet.add(supertype);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
supertypeSet.forEach(maybeEnqueue_1);
|
||||
if (needToCheckFuzzySubtypes &&
|
||||
// Start checking fuzzy subtypes only after exhausting all
|
||||
// non-fuzzy subtypes (after the final iteration of the loop).
|
||||
i === workQueue_1.length - 1 &&
|
||||
// We could wait to compare fragment.selectionSet to result
|
||||
// after we verify the supertype, but this check is often less
|
||||
// expensive than that search, and we will have to do the
|
||||
// comparison anyway whenever we find a potential match.
|
||||
selectionSetMatchesResult(fragment.selectionSet, result, variables)) {
|
||||
// We don't always need to check fuzzy subtypes (if no result
|
||||
// was provided, or !this.fuzzySubtypes.size), but, when we do,
|
||||
// we only want to check them once.
|
||||
needToCheckFuzzySubtypes = false;
|
||||
checkingFuzzySubtypes = true;
|
||||
// If we find any fuzzy subtypes that match typename, extend the
|
||||
// workQueue to search through the supertypes of those fuzzy
|
||||
// subtypes. Otherwise the for-loop will terminate and we'll
|
||||
// return false below.
|
||||
this.fuzzySubtypes.forEach(function (regExp, fuzzyString) {
|
||||
var match = typename.match(regExp);
|
||||
if (match && match[0] === typename) {
|
||||
maybeEnqueue_1(fuzzyString);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
Policies.prototype.hasKeyArgs = function (typename, fieldName) {
|
||||
var policy = this.getFieldPolicy(typename, fieldName, false);
|
||||
return !!(policy && policy.keyFn);
|
||||
};
|
||||
Policies.prototype.getStoreFieldName = function (fieldSpec) {
|
||||
var typename = fieldSpec.typename, fieldName = fieldSpec.fieldName;
|
||||
var policy = this.getFieldPolicy(typename, fieldName, false);
|
||||
var storeFieldName;
|
||||
var keyFn = policy && policy.keyFn;
|
||||
if (keyFn && typename) {
|
||||
var context = {
|
||||
typename: typename,
|
||||
fieldName: fieldName,
|
||||
field: fieldSpec.field || null,
|
||||
variables: fieldSpec.variables,
|
||||
};
|
||||
var args = argsFromFieldSpecifier(fieldSpec);
|
||||
while (keyFn) {
|
||||
var specifierOrString = keyFn(args, context);
|
||||
if (isArray(specifierOrString)) {
|
||||
keyFn = keyArgsFnFromSpecifier(specifierOrString);
|
||||
}
|
||||
else {
|
||||
// If the custom keyFn returns a falsy value, fall back to
|
||||
// fieldName instead.
|
||||
storeFieldName = specifierOrString || fieldName;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (storeFieldName === void 0) {
|
||||
storeFieldName =
|
||||
fieldSpec.field ?
|
||||
storeKeyNameFromField(fieldSpec.field, fieldSpec.variables)
|
||||
: getStoreKeyName(fieldName, argsFromFieldSpecifier(fieldSpec));
|
||||
}
|
||||
// Returning false from a keyArgs function is like configuring
|
||||
// keyArgs: false, but more dynamic.
|
||||
if (storeFieldName === false) {
|
||||
return fieldName;
|
||||
}
|
||||
// Make sure custom field names start with the actual field.name.value
|
||||
// of the field, so we can always figure out which properties of a
|
||||
// StoreObject correspond to which original field names.
|
||||
return fieldName === fieldNameFromStoreName(storeFieldName) ? storeFieldName
|
||||
: fieldName + ":" + storeFieldName;
|
||||
};
|
||||
Policies.prototype.readField = function (options, context) {
|
||||
var objectOrReference = options.from;
|
||||
if (!objectOrReference)
|
||||
return;
|
||||
var nameOrField = options.field || options.fieldName;
|
||||
if (!nameOrField)
|
||||
return;
|
||||
if (options.typename === void 0) {
|
||||
var typename = context.store.getFieldValue(objectOrReference, "__typename");
|
||||
if (typename)
|
||||
options.typename = typename;
|
||||
}
|
||||
var storeFieldName = this.getStoreFieldName(options);
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var existing = context.store.getFieldValue(objectOrReference, storeFieldName);
|
||||
var policy = this.getFieldPolicy(options.typename, fieldName, false);
|
||||
var read = policy && policy.read;
|
||||
if (read) {
|
||||
var readOptions = makeFieldFunctionOptions(this, objectOrReference, options, context, context.store.getStorage(isReference(objectOrReference) ?
|
||||
objectOrReference.__ref
|
||||
: objectOrReference, storeFieldName));
|
||||
// Call read(existing, readOptions) with cacheSlot holding this.cache.
|
||||
return cacheSlot.withValue(this.cache, read, [
|
||||
existing,
|
||||
readOptions,
|
||||
]);
|
||||
}
|
||||
return existing;
|
||||
};
|
||||
Policies.prototype.getReadFunction = function (typename, fieldName) {
|
||||
var policy = this.getFieldPolicy(typename, fieldName, false);
|
||||
return policy && policy.read;
|
||||
};
|
||||
Policies.prototype.getMergeFunction = function (parentTypename, fieldName, childTypename) {
|
||||
var policy = this.getFieldPolicy(parentTypename, fieldName, false);
|
||||
var merge = policy && policy.merge;
|
||||
if (!merge && childTypename) {
|
||||
policy = this.getTypePolicy(childTypename);
|
||||
merge = policy && policy.merge;
|
||||
}
|
||||
return merge;
|
||||
};
|
||||
Policies.prototype.runMergeFunction = function (existing, incoming, _a, context, storage) {
|
||||
var field = _a.field, typename = _a.typename, merge = _a.merge;
|
||||
if (merge === mergeTrueFn) {
|
||||
// Instead of going to the trouble of creating a full
|
||||
// FieldFunctionOptions object and calling mergeTrueFn, we can
|
||||
// simply call mergeObjects, as mergeTrueFn would.
|
||||
return makeMergeObjectsFunction(context.store)(existing, incoming);
|
||||
}
|
||||
if (merge === mergeFalseFn) {
|
||||
// Likewise for mergeFalseFn, whose implementation is even simpler.
|
||||
return incoming;
|
||||
}
|
||||
// If cache.writeQuery or cache.writeFragment was called with
|
||||
// options.overwrite set to true, we still call merge functions, but
|
||||
// the existing data is always undefined, so the merge function will
|
||||
// not attempt to combine the incoming data with the existing data.
|
||||
if (context.overwrite) {
|
||||
existing = void 0;
|
||||
}
|
||||
return merge(existing, incoming, makeFieldFunctionOptions(this,
|
||||
// Unlike options.readField for read functions, we do not fall
|
||||
// back to the current object if no foreignObjOrRef is provided,
|
||||
// because it's not clear what the current object should be for
|
||||
// merge functions: the (possibly undefined) existing object, or
|
||||
// the incoming object? If you think your merge function needs
|
||||
// to read sibling fields in order to produce a new value for
|
||||
// the current field, you might want to rethink your strategy,
|
||||
// because that's a recipe for making merge behavior sensitive
|
||||
// to the order in which fields are written into the cache.
|
||||
// However, readField(name, ref) is useful for merge functions
|
||||
// that need to deduplicate child objects and references.
|
||||
void 0, {
|
||||
typename: typename,
|
||||
fieldName: field.name.value,
|
||||
field: field,
|
||||
variables: context.variables,
|
||||
}, context, storage || Object.create(null)));
|
||||
};
|
||||
return Policies;
|
||||
}());
|
||||
export { Policies };
|
||||
function makeFieldFunctionOptions(policies, objectOrReference, fieldSpec, context, storage) {
|
||||
var storeFieldName = policies.getStoreFieldName(fieldSpec);
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var variables = fieldSpec.variables || context.variables;
|
||||
var _a = context.store, toReference = _a.toReference, canRead = _a.canRead;
|
||||
return {
|
||||
args: argsFromFieldSpecifier(fieldSpec),
|
||||
field: fieldSpec.field || null,
|
||||
fieldName: fieldName,
|
||||
storeFieldName: storeFieldName,
|
||||
variables: variables,
|
||||
isReference: isReference,
|
||||
toReference: toReference,
|
||||
storage: storage,
|
||||
cache: policies.cache,
|
||||
canRead: canRead,
|
||||
readField: function () {
|
||||
return policies.readField(normalizeReadFieldOptions(arguments, objectOrReference, variables), context);
|
||||
},
|
||||
mergeObjects: makeMergeObjectsFunction(context.store),
|
||||
};
|
||||
}
|
||||
export function normalizeReadFieldOptions(readFieldArgs, objectOrReference, variables) {
|
||||
var fieldNameOrOptions = readFieldArgs[0], from = readFieldArgs[1], argc = readFieldArgs.length;
|
||||
var options;
|
||||
if (typeof fieldNameOrOptions === "string") {
|
||||
options = {
|
||||
fieldName: fieldNameOrOptions,
|
||||
// Default to objectOrReference only when no second argument was
|
||||
// passed for the from parameter, not when undefined is explicitly
|
||||
// passed as the second argument.
|
||||
from: argc > 1 ? from : objectOrReference,
|
||||
};
|
||||
}
|
||||
else {
|
||||
options = __assign({}, fieldNameOrOptions);
|
||||
// Default to objectOrReference only when fieldNameOrOptions.from is
|
||||
// actually omitted, rather than just undefined.
|
||||
if (!hasOwn.call(options, "from")) {
|
||||
options.from = objectOrReference;
|
||||
}
|
||||
}
|
||||
if (globalThis.__DEV__ !== false && options.from === void 0) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(7, stringifyForDisplay(Array.from(readFieldArgs)));
|
||||
}
|
||||
if (void 0 === options.variables) {
|
||||
options.variables = variables;
|
||||
}
|
||||
return options;
|
||||
}
|
||||
function makeMergeObjectsFunction(store) {
|
||||
return function mergeObjects(existing, incoming) {
|
||||
if (isArray(existing) || isArray(incoming)) {
|
||||
throw newInvariantError(8);
|
||||
}
|
||||
// These dynamic checks are necessary because the parameters of a
|
||||
// custom merge function can easily have the any type, so the type
|
||||
// system cannot always enforce the StoreObject | Reference parameter
|
||||
// types of options.mergeObjects.
|
||||
if (isNonNullObject(existing) && isNonNullObject(incoming)) {
|
||||
var eType = store.getFieldValue(existing, "__typename");
|
||||
var iType = store.getFieldValue(incoming, "__typename");
|
||||
var typesDiffer = eType && iType && eType !== iType;
|
||||
if (typesDiffer) {
|
||||
return incoming;
|
||||
}
|
||||
if (isReference(existing) && storeValueIsStoreObject(incoming)) {
|
||||
// Update the normalized EntityStore for the entity identified by
|
||||
// existing.__ref, preferring/overwriting any fields contributed by the
|
||||
// newer incoming StoreObject.
|
||||
store.merge(existing.__ref, incoming);
|
||||
return existing;
|
||||
}
|
||||
if (storeValueIsStoreObject(existing) && isReference(incoming)) {
|
||||
// Update the normalized EntityStore for the entity identified by
|
||||
// incoming.__ref, taking fields from the older existing object only if
|
||||
// those fields are not already present in the newer StoreObject
|
||||
// identified by incoming.__ref.
|
||||
store.merge(existing, incoming.__ref);
|
||||
return incoming;
|
||||
}
|
||||
if (storeValueIsStoreObject(existing) &&
|
||||
storeValueIsStoreObject(incoming)) {
|
||||
return __assign(__assign({}, existing), incoming);
|
||||
}
|
||||
}
|
||||
return incoming;
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=policies.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/policies.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/policies.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
18
graphql-subscription/node_modules/@apollo/client/cache/inmemory/reactiveVars.d.ts
generated
vendored
Normal file
18
graphql-subscription/node_modules/@apollo/client/cache/inmemory/reactiveVars.d.ts
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import type { ApolloCache } from "../../core/index.js";
|
||||
export interface ReactiveVar<T> {
|
||||
(newValue?: T): T;
|
||||
onNextChange(listener: ReactiveListener<T>): () => void;
|
||||
attachCache(cache: ApolloCache<any>): this;
|
||||
forgetCache(cache: ApolloCache<any>): boolean;
|
||||
}
|
||||
export type ReactiveListener<T> = (value: T) => any;
|
||||
export declare const cacheSlot: {
|
||||
readonly id: string;
|
||||
hasValue(): boolean;
|
||||
getValue(): ApolloCache<any> | undefined;
|
||||
withValue<TResult, TArgs extends any[], TThis = any>(value: ApolloCache<any>, callback: (this: TThis, ...args: TArgs) => TResult, args?: TArgs | undefined, thisArg?: TThis | undefined): TResult;
|
||||
};
|
||||
export declare function forgetCache(cache: ApolloCache<any>): void;
|
||||
export declare function recallCache(cache: ApolloCache<any>): void;
|
||||
export declare function makeVar<T>(value: T): ReactiveVar<T>;
|
||||
//# sourceMappingURL=reactiveVars.d.ts.map
|
||||
83
graphql-subscription/node_modules/@apollo/client/cache/inmemory/reactiveVars.js
generated
vendored
Normal file
83
graphql-subscription/node_modules/@apollo/client/cache/inmemory/reactiveVars.js
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
import { dep, Slot } from "optimism";
|
||||
// Contextual Slot that acquires its value when custom read functions are
|
||||
// called in Policies#readField.
|
||||
export var cacheSlot = new Slot();
|
||||
var cacheInfoMap = new WeakMap();
|
||||
function getCacheInfo(cache) {
|
||||
var info = cacheInfoMap.get(cache);
|
||||
if (!info) {
|
||||
cacheInfoMap.set(cache, (info = {
|
||||
vars: new Set(),
|
||||
dep: dep(),
|
||||
}));
|
||||
}
|
||||
return info;
|
||||
}
|
||||
export function forgetCache(cache) {
|
||||
getCacheInfo(cache).vars.forEach(function (rv) { return rv.forgetCache(cache); });
|
||||
}
|
||||
// Calling forgetCache(cache) serves to silence broadcasts and allows the
|
||||
// cache to be garbage collected. However, the varsByCache WeakMap
|
||||
// preserves the set of reactive variables that were previously associated
|
||||
// with this cache, which makes it possible to "recall" the cache at a
|
||||
// later time, by reattaching it to those variables. If the cache has been
|
||||
// garbage collected in the meantime, because it is no longer reachable,
|
||||
// you won't be able to call recallCache(cache), and the cache will
|
||||
// automatically disappear from the varsByCache WeakMap.
|
||||
export function recallCache(cache) {
|
||||
getCacheInfo(cache).vars.forEach(function (rv) { return rv.attachCache(cache); });
|
||||
}
|
||||
export function makeVar(value) {
|
||||
var caches = new Set();
|
||||
var listeners = new Set();
|
||||
var rv = function (newValue) {
|
||||
if (arguments.length > 0) {
|
||||
if (value !== newValue) {
|
||||
value = newValue;
|
||||
caches.forEach(function (cache) {
|
||||
// Invalidate any fields with custom read functions that
|
||||
// consumed this variable, so query results involving those
|
||||
// fields will be recomputed the next time we read them.
|
||||
getCacheInfo(cache).dep.dirty(rv);
|
||||
// Broadcast changes to any caches that have previously read
|
||||
// from this variable.
|
||||
broadcast(cache);
|
||||
});
|
||||
// Finally, notify any listeners added via rv.onNextChange.
|
||||
var oldListeners = Array.from(listeners);
|
||||
listeners.clear();
|
||||
oldListeners.forEach(function (listener) { return listener(value); });
|
||||
}
|
||||
}
|
||||
else {
|
||||
// When reading from the variable, obtain the current cache from
|
||||
// context via cacheSlot. This isn't entirely foolproof, but it's
|
||||
// the same system that powers varDep.
|
||||
var cache = cacheSlot.getValue();
|
||||
if (cache) {
|
||||
attach(cache);
|
||||
getCacheInfo(cache).dep(rv);
|
||||
}
|
||||
}
|
||||
return value;
|
||||
};
|
||||
rv.onNextChange = function (listener) {
|
||||
listeners.add(listener);
|
||||
return function () {
|
||||
listeners.delete(listener);
|
||||
};
|
||||
};
|
||||
var attach = (rv.attachCache = function (cache) {
|
||||
caches.add(cache);
|
||||
getCacheInfo(cache).vars.add(rv);
|
||||
return rv;
|
||||
});
|
||||
rv.forgetCache = function (cache) { return caches.delete(cache); };
|
||||
return rv;
|
||||
}
|
||||
function broadcast(cache) {
|
||||
if (cache.broadcastWatches) {
|
||||
cache.broadcastWatches();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=reactiveVars.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/reactiveVars.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/reactiveVars.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
40
graphql-subscription/node_modules/@apollo/client/cache/inmemory/readFromStore.d.ts
generated
vendored
Normal file
40
graphql-subscription/node_modules/@apollo/client/cache/inmemory/readFromStore.d.ts
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
import type { SelectionSetNode } from "graphql";
|
||||
import type { Reference, StoreObject } from "../../utilities/index.js";
|
||||
import type { Cache } from "../core/types/Cache.js";
|
||||
import type { DiffQueryAgainstStoreOptions, InMemoryCacheConfig, ReadMergeModifyContext } from "./types.js";
|
||||
import type { InMemoryCache } from "./inMemoryCache.js";
|
||||
import type { MissingTree } from "../core/types/common.js";
|
||||
import { ObjectCanon } from "./object-canon.js";
|
||||
export type VariableMap = {
|
||||
[name: string]: any;
|
||||
};
|
||||
export type ExecResult<R = any> = {
|
||||
result: R;
|
||||
missing?: MissingTree;
|
||||
};
|
||||
export interface StoreReaderConfig {
|
||||
cache: InMemoryCache;
|
||||
addTypename?: boolean;
|
||||
resultCacheMaxSize?: number;
|
||||
canonizeResults?: boolean;
|
||||
canon?: ObjectCanon;
|
||||
fragments?: InMemoryCacheConfig["fragments"];
|
||||
}
|
||||
export declare class StoreReader {
|
||||
private executeSelectionSet;
|
||||
private executeSubSelectedArray;
|
||||
private config;
|
||||
private knownResults;
|
||||
canon: ObjectCanon;
|
||||
resetCanon(): void;
|
||||
constructor(config: StoreReaderConfig);
|
||||
/**
|
||||
* Given a store and a query, return as much of the result as possible and
|
||||
* identify if any data was missing from the store.
|
||||
*/
|
||||
diffQueryAgainstStore<T>({ store, query, rootId, variables, returnPartialData, canonizeResults, }: DiffQueryAgainstStoreOptions): Cache.DiffResult<T>;
|
||||
isFresh(result: Record<string, any>, parent: StoreObject | Reference, selectionSet: SelectionSetNode, context: ReadMergeModifyContext): boolean;
|
||||
private execSelectionSetImpl;
|
||||
private execSubSelectedArrayImpl;
|
||||
}
|
||||
//# sourceMappingURL=readFromStore.d.ts.map
|
||||
329
graphql-subscription/node_modules/@apollo/client/cache/inmemory/readFromStore.js
generated
vendored
Normal file
329
graphql-subscription/node_modules/@apollo/client/cache/inmemory/readFromStore.js
generated
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
import { __assign } from "tslib";
|
||||
import { invariant, newInvariantError } from "../../utilities/globals/index.js";
|
||||
import { Kind } from "graphql";
|
||||
import { wrap } from "optimism";
|
||||
import { isField, resultKeyNameFromField, isReference, makeReference, shouldInclude, addTypenameToDocument, getDefaultValues, getMainDefinition, getQueryDefinition, getFragmentFromSelection, maybeDeepFreeze, mergeDeepArray, DeepMerger, isNonNullObject, canUseWeakMap, compact, canonicalStringify, cacheSizes, } from "../../utilities/index.js";
|
||||
import { maybeDependOnExistenceOfEntity, supportsResultCaching, } from "./entityStore.js";
|
||||
import { isArray, extractFragmentContext, getTypenameFromStoreObject, shouldCanonizeResults, } from "./helpers.js";
|
||||
import { MissingFieldError } from "../core/types/common.js";
|
||||
import { ObjectCanon } from "./object-canon.js";
|
||||
function execSelectionSetKeyArgs(options) {
|
||||
return [
|
||||
options.selectionSet,
|
||||
options.objectOrReference,
|
||||
options.context,
|
||||
// We split out this property so we can pass different values
|
||||
// independently without modifying options.context itself.
|
||||
options.context.canonizeResults,
|
||||
];
|
||||
}
|
||||
var StoreReader = /** @class */ (function () {
|
||||
function StoreReader(config) {
|
||||
var _this = this;
|
||||
this.knownResults = new (canUseWeakMap ? WeakMap : Map)();
|
||||
this.config = compact(config, {
|
||||
addTypename: config.addTypename !== false,
|
||||
canonizeResults: shouldCanonizeResults(config),
|
||||
});
|
||||
this.canon = config.canon || new ObjectCanon();
|
||||
// memoized functions in this class will be "garbage-collected"
|
||||
// by recreating the whole `StoreReader` in
|
||||
// `InMemoryCache.resetResultsCache`
|
||||
// (triggered from `InMemoryCache.gc` with `resetResultCache: true`)
|
||||
this.executeSelectionSet = wrap(function (options) {
|
||||
var _a;
|
||||
var canonizeResults = options.context.canonizeResults;
|
||||
var peekArgs = execSelectionSetKeyArgs(options);
|
||||
// Negate this boolean option so we can find out if we've already read
|
||||
// this result using the other boolean value.
|
||||
peekArgs[3] = !canonizeResults;
|
||||
var other = (_a = _this.executeSelectionSet).peek.apply(_a, peekArgs);
|
||||
if (other) {
|
||||
if (canonizeResults) {
|
||||
return __assign(__assign({}, other), {
|
||||
// If we previously read this result without canonizing it, we can
|
||||
// reuse that result simply by canonizing it now.
|
||||
result: _this.canon.admit(other.result) });
|
||||
}
|
||||
// If we previously read this result with canonization enabled, we can
|
||||
// return that canonized result as-is.
|
||||
return other;
|
||||
}
|
||||
maybeDependOnExistenceOfEntity(options.context.store, options.enclosingRef.__ref);
|
||||
// Finally, if we didn't find any useful previous results, run the real
|
||||
// execSelectionSetImpl method with the given options.
|
||||
return _this.execSelectionSetImpl(options);
|
||||
}, {
|
||||
max: this.config.resultCacheMaxSize ||
|
||||
cacheSizes["inMemoryCache.executeSelectionSet"] ||
|
||||
50000 /* defaultCacheSizes["inMemoryCache.executeSelectionSet"] */,
|
||||
keyArgs: execSelectionSetKeyArgs,
|
||||
// Note that the parameters of makeCacheKey are determined by the
|
||||
// array returned by keyArgs.
|
||||
makeCacheKey: function (selectionSet, parent, context, canonizeResults) {
|
||||
if (supportsResultCaching(context.store)) {
|
||||
return context.store.makeCacheKey(selectionSet, isReference(parent) ? parent.__ref : parent, context.varString, canonizeResults);
|
||||
}
|
||||
},
|
||||
});
|
||||
this.executeSubSelectedArray = wrap(function (options) {
|
||||
maybeDependOnExistenceOfEntity(options.context.store, options.enclosingRef.__ref);
|
||||
return _this.execSubSelectedArrayImpl(options);
|
||||
}, {
|
||||
max: this.config.resultCacheMaxSize ||
|
||||
cacheSizes["inMemoryCache.executeSubSelectedArray"] ||
|
||||
10000 /* defaultCacheSizes["inMemoryCache.executeSubSelectedArray"] */,
|
||||
makeCacheKey: function (_a) {
|
||||
var field = _a.field, array = _a.array, context = _a.context;
|
||||
if (supportsResultCaching(context.store)) {
|
||||
return context.store.makeCacheKey(field, array, context.varString);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
StoreReader.prototype.resetCanon = function () {
|
||||
this.canon = new ObjectCanon();
|
||||
};
|
||||
/**
|
||||
* Given a store and a query, return as much of the result as possible and
|
||||
* identify if any data was missing from the store.
|
||||
*/
|
||||
StoreReader.prototype.diffQueryAgainstStore = function (_a) {
|
||||
var store = _a.store, query = _a.query, _b = _a.rootId, rootId = _b === void 0 ? "ROOT_QUERY" : _b, variables = _a.variables, _c = _a.returnPartialData, returnPartialData = _c === void 0 ? true : _c, _d = _a.canonizeResults, canonizeResults = _d === void 0 ? this.config.canonizeResults : _d;
|
||||
var policies = this.config.cache.policies;
|
||||
variables = __assign(__assign({}, getDefaultValues(getQueryDefinition(query))), variables);
|
||||
var rootRef = makeReference(rootId);
|
||||
var execResult = this.executeSelectionSet({
|
||||
selectionSet: getMainDefinition(query).selectionSet,
|
||||
objectOrReference: rootRef,
|
||||
enclosingRef: rootRef,
|
||||
context: __assign({ store: store, query: query, policies: policies, variables: variables, varString: canonicalStringify(variables), canonizeResults: canonizeResults }, extractFragmentContext(query, this.config.fragments)),
|
||||
});
|
||||
var missing;
|
||||
if (execResult.missing) {
|
||||
// For backwards compatibility we still report an array of
|
||||
// MissingFieldError objects, even though there will only ever be at most
|
||||
// one of them, now that all missing field error messages are grouped
|
||||
// together in the execResult.missing tree.
|
||||
missing = [
|
||||
new MissingFieldError(firstMissing(execResult.missing), execResult.missing, query, variables),
|
||||
];
|
||||
if (!returnPartialData) {
|
||||
throw missing[0];
|
||||
}
|
||||
}
|
||||
return {
|
||||
result: execResult.result,
|
||||
complete: !missing,
|
||||
missing: missing,
|
||||
};
|
||||
};
|
||||
StoreReader.prototype.isFresh = function (result, parent, selectionSet, context) {
|
||||
if (supportsResultCaching(context.store) &&
|
||||
this.knownResults.get(result) === selectionSet) {
|
||||
var latest = this.executeSelectionSet.peek(selectionSet, parent, context,
|
||||
// If result is canonical, then it could only have been previously
|
||||
// cached by the canonizing version of executeSelectionSet, so we can
|
||||
// avoid checking both possibilities here.
|
||||
this.canon.isKnown(result));
|
||||
if (latest && result === latest.result) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
// Uncached version of executeSelectionSet.
|
||||
StoreReader.prototype.execSelectionSetImpl = function (_a) {
|
||||
var _this = this;
|
||||
var selectionSet = _a.selectionSet, objectOrReference = _a.objectOrReference, enclosingRef = _a.enclosingRef, context = _a.context;
|
||||
if (isReference(objectOrReference) &&
|
||||
!context.policies.rootTypenamesById[objectOrReference.__ref] &&
|
||||
!context.store.has(objectOrReference.__ref)) {
|
||||
return {
|
||||
result: this.canon.empty,
|
||||
missing: "Dangling reference to missing ".concat(objectOrReference.__ref, " object"),
|
||||
};
|
||||
}
|
||||
var variables = context.variables, policies = context.policies, store = context.store;
|
||||
var typename = store.getFieldValue(objectOrReference, "__typename");
|
||||
var objectsToMerge = [];
|
||||
var missing;
|
||||
var missingMerger = new DeepMerger();
|
||||
if (this.config.addTypename &&
|
||||
typeof typename === "string" &&
|
||||
!policies.rootIdsByTypename[typename]) {
|
||||
// Ensure we always include a default value for the __typename
|
||||
// field, if we have one, and this.config.addTypename is true. Note
|
||||
// that this field can be overridden by other merged objects.
|
||||
objectsToMerge.push({ __typename: typename });
|
||||
}
|
||||
function handleMissing(result, resultName) {
|
||||
var _a;
|
||||
if (result.missing) {
|
||||
missing = missingMerger.merge(missing, (_a = {},
|
||||
_a[resultName] = result.missing,
|
||||
_a));
|
||||
}
|
||||
return result.result;
|
||||
}
|
||||
var workSet = new Set(selectionSet.selections);
|
||||
workSet.forEach(function (selection) {
|
||||
var _a, _b;
|
||||
// Omit fields with directives @skip(if: <truthy value>) or
|
||||
// @include(if: <falsy value>).
|
||||
if (!shouldInclude(selection, variables))
|
||||
return;
|
||||
if (isField(selection)) {
|
||||
var fieldValue = policies.readField({
|
||||
fieldName: selection.name.value,
|
||||
field: selection,
|
||||
variables: context.variables,
|
||||
from: objectOrReference,
|
||||
}, context);
|
||||
var resultName = resultKeyNameFromField(selection);
|
||||
if (fieldValue === void 0) {
|
||||
if (!addTypenameToDocument.added(selection)) {
|
||||
missing = missingMerger.merge(missing, (_a = {},
|
||||
_a[resultName] = "Can't find field '".concat(selection.name.value, "' on ").concat(isReference(objectOrReference) ?
|
||||
objectOrReference.__ref + " object"
|
||||
: "object " + JSON.stringify(objectOrReference, null, 2)),
|
||||
_a));
|
||||
}
|
||||
}
|
||||
else if (isArray(fieldValue)) {
|
||||
fieldValue = handleMissing(_this.executeSubSelectedArray({
|
||||
field: selection,
|
||||
array: fieldValue,
|
||||
enclosingRef: enclosingRef,
|
||||
context: context,
|
||||
}), resultName);
|
||||
}
|
||||
else if (!selection.selectionSet) {
|
||||
// If the field does not have a selection set, then we handle it
|
||||
// as a scalar value. To keep this.canon from canonicalizing
|
||||
// this value, we use this.canon.pass to wrap fieldValue in a
|
||||
// Pass object that this.canon.admit will later unwrap as-is.
|
||||
if (context.canonizeResults) {
|
||||
fieldValue = _this.canon.pass(fieldValue);
|
||||
}
|
||||
}
|
||||
else if (fieldValue != null) {
|
||||
// In this case, because we know the field has a selection set,
|
||||
// it must be trying to query a GraphQLObjectType, which is why
|
||||
// fieldValue must be != null.
|
||||
fieldValue = handleMissing(_this.executeSelectionSet({
|
||||
selectionSet: selection.selectionSet,
|
||||
objectOrReference: fieldValue,
|
||||
enclosingRef: isReference(fieldValue) ? fieldValue : enclosingRef,
|
||||
context: context,
|
||||
}), resultName);
|
||||
}
|
||||
if (fieldValue !== void 0) {
|
||||
objectsToMerge.push((_b = {}, _b[resultName] = fieldValue, _b));
|
||||
}
|
||||
}
|
||||
else {
|
||||
var fragment = getFragmentFromSelection(selection, context.lookupFragment);
|
||||
if (!fragment && selection.kind === Kind.FRAGMENT_SPREAD) {
|
||||
throw newInvariantError(9, selection.name.value);
|
||||
}
|
||||
if (fragment && policies.fragmentMatches(fragment, typename)) {
|
||||
fragment.selectionSet.selections.forEach(workSet.add, workSet);
|
||||
}
|
||||
}
|
||||
});
|
||||
var result = mergeDeepArray(objectsToMerge);
|
||||
var finalResult = { result: result, missing: missing };
|
||||
var frozen = context.canonizeResults ?
|
||||
this.canon.admit(finalResult)
|
||||
// Since this.canon is normally responsible for freezing results (only in
|
||||
// development), freeze them manually if canonization is disabled.
|
||||
: maybeDeepFreeze(finalResult);
|
||||
// Store this result with its selection set so that we can quickly
|
||||
// recognize it again in the StoreReader#isFresh method.
|
||||
if (frozen.result) {
|
||||
this.knownResults.set(frozen.result, selectionSet);
|
||||
}
|
||||
return frozen;
|
||||
};
|
||||
// Uncached version of executeSubSelectedArray.
|
||||
StoreReader.prototype.execSubSelectedArrayImpl = function (_a) {
|
||||
var _this = this;
|
||||
var field = _a.field, array = _a.array, enclosingRef = _a.enclosingRef, context = _a.context;
|
||||
var missing;
|
||||
var missingMerger = new DeepMerger();
|
||||
function handleMissing(childResult, i) {
|
||||
var _a;
|
||||
if (childResult.missing) {
|
||||
missing = missingMerger.merge(missing, (_a = {}, _a[i] = childResult.missing, _a));
|
||||
}
|
||||
return childResult.result;
|
||||
}
|
||||
if (field.selectionSet) {
|
||||
array = array.filter(context.store.canRead);
|
||||
}
|
||||
array = array.map(function (item, i) {
|
||||
// null value in array
|
||||
if (item === null) {
|
||||
return null;
|
||||
}
|
||||
// This is a nested array, recurse
|
||||
if (isArray(item)) {
|
||||
return handleMissing(_this.executeSubSelectedArray({
|
||||
field: field,
|
||||
array: item,
|
||||
enclosingRef: enclosingRef,
|
||||
context: context,
|
||||
}), i);
|
||||
}
|
||||
// This is an object, run the selection set on it
|
||||
if (field.selectionSet) {
|
||||
return handleMissing(_this.executeSelectionSet({
|
||||
selectionSet: field.selectionSet,
|
||||
objectOrReference: item,
|
||||
enclosingRef: isReference(item) ? item : enclosingRef,
|
||||
context: context,
|
||||
}), i);
|
||||
}
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
assertSelectionSetForIdValue(context.store, field, item);
|
||||
}
|
||||
return item;
|
||||
});
|
||||
return {
|
||||
result: context.canonizeResults ? this.canon.admit(array) : array,
|
||||
missing: missing,
|
||||
};
|
||||
};
|
||||
return StoreReader;
|
||||
}());
|
||||
export { StoreReader };
|
||||
function firstMissing(tree) {
|
||||
try {
|
||||
JSON.stringify(tree, function (_, value) {
|
||||
if (typeof value === "string")
|
||||
throw value;
|
||||
return value;
|
||||
});
|
||||
}
|
||||
catch (result) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
function assertSelectionSetForIdValue(store, field, fieldValue) {
|
||||
if (!field.selectionSet) {
|
||||
var workSet_1 = new Set([fieldValue]);
|
||||
workSet_1.forEach(function (value) {
|
||||
if (isNonNullObject(value)) {
|
||||
invariant(
|
||||
!isReference(value),
|
||||
10,
|
||||
getTypenameFromStoreObject(store, value),
|
||||
field.name.value
|
||||
);
|
||||
Object.values(value).forEach(workSet_1.add, workSet_1);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=readFromStore.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/readFromStore.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/readFromStore.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
126
graphql-subscription/node_modules/@apollo/client/cache/inmemory/types.d.ts
generated
vendored
Normal file
126
graphql-subscription/node_modules/@apollo/client/cache/inmemory/types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
import type { DocumentNode, FieldNode } from "graphql";
|
||||
import type { Transaction } from "../core/cache.js";
|
||||
import type { StoreObject, StoreValue, Reference } from "../../utilities/index.js";
|
||||
import type { FieldValueGetter } from "./entityStore.js";
|
||||
import type { TypePolicies, PossibleTypesMap, KeyFieldsFunction, StorageType, FieldMergeFunction } from "./policies.js";
|
||||
import type { Modifiers, ToReferenceFunction, CanReadFunction, AllFieldsModifier } from "../core/types/common.js";
|
||||
import type { FragmentRegistryAPI } from "./fragmentRegistry.js";
|
||||
export type { StoreObject, StoreValue, Reference };
|
||||
export interface IdGetterObj extends Object {
|
||||
__typename?: string;
|
||||
id?: string;
|
||||
_id?: string;
|
||||
}
|
||||
export declare type IdGetter = (value: IdGetterObj) => string | undefined;
|
||||
/**
|
||||
* This is an interface used to access, set and remove
|
||||
* StoreObjects from the cache
|
||||
*/
|
||||
export interface NormalizedCache {
|
||||
has(dataId: string): boolean;
|
||||
get(dataId: string, fieldName: string): StoreValue;
|
||||
merge(olderId: string, newerObject: StoreObject): void;
|
||||
merge(olderObject: StoreObject, newerId: string): void;
|
||||
modify<Entity extends Record<string, any>>(dataId: string, fields: Modifiers<Entity> | AllFieldsModifier<Entity>): boolean;
|
||||
delete(dataId: string, fieldName?: string): boolean;
|
||||
clear(): void;
|
||||
/**
|
||||
* returns an Object with key-value pairs matching the contents of the store
|
||||
*/
|
||||
toObject(): NormalizedCacheObject;
|
||||
/**
|
||||
* replace the state of the store
|
||||
*/
|
||||
replace(newData: NormalizedCacheObject): void;
|
||||
/**
|
||||
* Retain (or release) a given root ID to protect (or expose) it and its
|
||||
* transitive child entities from (or to) garbage collection. The current
|
||||
* retainment count is returned by both methods. Note that releasing a root
|
||||
* ID does not cause that entity to be garbage collected, but merely removes
|
||||
* it from the set of root IDs that will be considered during the next
|
||||
* mark-and-sweep collection.
|
||||
*/
|
||||
retain(rootId: string): number;
|
||||
release(rootId: string): number;
|
||||
getFieldValue: FieldValueGetter;
|
||||
toReference: ToReferenceFunction;
|
||||
canRead: CanReadFunction;
|
||||
getStorage(idOrObj: string | StoreObject, ...storeFieldNames: (string | number)[]): StorageType;
|
||||
}
|
||||
/**
|
||||
* This is a normalized representation of the Apollo query result cache. It consists of
|
||||
* a flattened representation of query result trees.
|
||||
*/
|
||||
export interface NormalizedCacheObject {
|
||||
__META?: {
|
||||
extraRootIds: string[];
|
||||
};
|
||||
[dataId: string]: StoreObject | undefined;
|
||||
}
|
||||
export type OptimisticStoreItem = {
|
||||
id: string;
|
||||
data: NormalizedCacheObject;
|
||||
transaction: Transaction<NormalizedCacheObject>;
|
||||
};
|
||||
export type ReadQueryOptions = {
|
||||
/**
|
||||
* The Apollo Client store object.
|
||||
*/
|
||||
store: NormalizedCache;
|
||||
/**
|
||||
* A parsed GraphQL query document.
|
||||
*/
|
||||
query: DocumentNode;
|
||||
variables?: Object;
|
||||
previousResult?: any;
|
||||
/**
|
||||
* @deprecated
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not
|
||||
* recommend using this option anymore.
|
||||
* A future version of Apollo Client will contain a similar feature without
|
||||
* the risk of memory leaks.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
rootId?: string;
|
||||
config?: ApolloReducerConfig;
|
||||
};
|
||||
export type DiffQueryAgainstStoreOptions = ReadQueryOptions & {
|
||||
returnPartialData?: boolean;
|
||||
};
|
||||
export type ApolloReducerConfig = {
|
||||
dataIdFromObject?: KeyFieldsFunction;
|
||||
addTypename?: boolean;
|
||||
};
|
||||
export interface InMemoryCacheConfig extends ApolloReducerConfig {
|
||||
resultCaching?: boolean;
|
||||
possibleTypes?: PossibleTypesMap;
|
||||
typePolicies?: TypePolicies;
|
||||
/**
|
||||
* @deprecated
|
||||
* Please use `cacheSizes` instead.
|
||||
*/
|
||||
resultCacheMaxSize?: number;
|
||||
/**
|
||||
* @deprecated
|
||||
* Using `canonizeResults` can result in memory leaks so we generally do not
|
||||
* recommend using this option anymore.
|
||||
* A future version of Apollo Client will contain a similar feature.
|
||||
*/
|
||||
canonizeResults?: boolean;
|
||||
fragments?: FragmentRegistryAPI;
|
||||
}
|
||||
export interface MergeInfo {
|
||||
field: FieldNode;
|
||||
typename: string | undefined;
|
||||
merge: FieldMergeFunction;
|
||||
}
|
||||
export interface MergeTree {
|
||||
info?: MergeInfo;
|
||||
map: Map<string | number, MergeTree>;
|
||||
}
|
||||
export interface ReadMergeModifyContext {
|
||||
store: NormalizedCache;
|
||||
variables?: Record<string, any>;
|
||||
varString?: string;
|
||||
}
|
||||
//# sourceMappingURL=types.d.ts.map
|
||||
2
graphql-subscription/node_modules/@apollo/client/cache/inmemory/types.js
generated
vendored
Normal file
2
graphql-subscription/node_modules/@apollo/client/cache/inmemory/types.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=types.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/types.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/types.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
37
graphql-subscription/node_modules/@apollo/client/cache/inmemory/writeToStore.d.ts
generated
vendored
Normal file
37
graphql-subscription/node_modules/@apollo/client/cache/inmemory/writeToStore.d.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
import type { SelectionSetNode, FieldNode } from "graphql";
|
||||
import type { FragmentMap, FragmentMapFunction, StoreObject, Reference } from "../../utilities/index.js";
|
||||
import type { NormalizedCache, ReadMergeModifyContext, MergeTree } from "./types.js";
|
||||
import type { StoreReader } from "./readFromStore.js";
|
||||
import type { InMemoryCache } from "./inMemoryCache.js";
|
||||
import type { Cache } from "../../core/index.js";
|
||||
export interface WriteContext extends ReadMergeModifyContext {
|
||||
readonly written: {
|
||||
[dataId: string]: SelectionSetNode[];
|
||||
};
|
||||
readonly fragmentMap: FragmentMap;
|
||||
lookupFragment: FragmentMapFunction;
|
||||
merge<T>(existing: T, incoming: T): T;
|
||||
overwrite: boolean;
|
||||
incomingById: Map<string, {
|
||||
storeObject: StoreObject;
|
||||
mergeTree?: MergeTree;
|
||||
fieldNodeSet: Set<FieldNode>;
|
||||
}>;
|
||||
clientOnly: boolean;
|
||||
deferred: boolean;
|
||||
flavors: Map<string, FlavorableWriteContext>;
|
||||
}
|
||||
type FlavorableWriteContext = Pick<WriteContext, "clientOnly" | "deferred" | "flavors">;
|
||||
export declare class StoreWriter {
|
||||
readonly cache: InMemoryCache;
|
||||
private reader?;
|
||||
private fragments?;
|
||||
constructor(cache: InMemoryCache, reader?: StoreReader | undefined, fragments?: import("./fragmentRegistry.js").FragmentRegistryAPI | undefined);
|
||||
writeToStore(store: NormalizedCache, { query, result, dataId, variables, overwrite }: Cache.WriteOptions): Reference | undefined;
|
||||
private processSelectionSet;
|
||||
private processFieldValue;
|
||||
private flattenFields;
|
||||
private applyMerges;
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=writeToStore.d.ts.map
|
||||
529
graphql-subscription/node_modules/@apollo/client/cache/inmemory/writeToStore.js
generated
vendored
Normal file
529
graphql-subscription/node_modules/@apollo/client/cache/inmemory/writeToStore.js
generated
vendored
Normal file
@@ -0,0 +1,529 @@
|
||||
import { __assign } from "tslib";
|
||||
import { invariant, newInvariantError } from "../../utilities/globals/index.js";
|
||||
import { equal } from "@wry/equality";
|
||||
import { Trie } from "@wry/trie";
|
||||
import { Kind } from "graphql";
|
||||
import { getFragmentFromSelection, getDefaultValues, getOperationDefinition, getTypenameFromResult, makeReference, isField, resultKeyNameFromField, isReference, shouldInclude, cloneDeep, addTypenameToDocument, isNonEmptyArray, argumentsObjectFromField, canonicalStringify, } from "../../utilities/index.js";
|
||||
import { isArray, makeProcessedFieldsMerger, fieldNameFromStoreName, storeValueIsStoreObject, extractFragmentContext, } from "./helpers.js";
|
||||
import { normalizeReadFieldOptions } from "./policies.js";
|
||||
// Since there are only four possible combinations of context.clientOnly and
|
||||
// context.deferred values, we should need at most four "flavors" of any given
|
||||
// WriteContext. To avoid creating multiple copies of the same context, we cache
|
||||
// the contexts in the context.flavors Map (shared by all flavors) according to
|
||||
// their clientOnly and deferred values (always in that order).
|
||||
function getContextFlavor(context, clientOnly, deferred) {
|
||||
var key = "".concat(clientOnly).concat(deferred);
|
||||
var flavored = context.flavors.get(key);
|
||||
if (!flavored) {
|
||||
context.flavors.set(key, (flavored =
|
||||
context.clientOnly === clientOnly && context.deferred === deferred ?
|
||||
context
|
||||
: __assign(__assign({}, context), { clientOnly: clientOnly, deferred: deferred })));
|
||||
}
|
||||
return flavored;
|
||||
}
|
||||
var StoreWriter = /** @class */ (function () {
|
||||
function StoreWriter(cache, reader, fragments) {
|
||||
this.cache = cache;
|
||||
this.reader = reader;
|
||||
this.fragments = fragments;
|
||||
}
|
||||
StoreWriter.prototype.writeToStore = function (store, _a) {
|
||||
var _this = this;
|
||||
var query = _a.query, result = _a.result, dataId = _a.dataId, variables = _a.variables, overwrite = _a.overwrite;
|
||||
var operationDefinition = getOperationDefinition(query);
|
||||
var merger = makeProcessedFieldsMerger();
|
||||
variables = __assign(__assign({}, getDefaultValues(operationDefinition)), variables);
|
||||
var context = __assign(__assign({ store: store, written: Object.create(null), merge: function (existing, incoming) {
|
||||
return merger.merge(existing, incoming);
|
||||
}, variables: variables, varString: canonicalStringify(variables) }, extractFragmentContext(query, this.fragments)), { overwrite: !!overwrite, incomingById: new Map(), clientOnly: false, deferred: false, flavors: new Map() });
|
||||
var ref = this.processSelectionSet({
|
||||
result: result || Object.create(null),
|
||||
dataId: dataId,
|
||||
selectionSet: operationDefinition.selectionSet,
|
||||
mergeTree: { map: new Map() },
|
||||
context: context,
|
||||
});
|
||||
if (!isReference(ref)) {
|
||||
throw newInvariantError(11, result);
|
||||
}
|
||||
// So far, the store has not been modified, so now it's time to process
|
||||
// context.incomingById and merge those incoming fields into context.store.
|
||||
context.incomingById.forEach(function (_a, dataId) {
|
||||
var storeObject = _a.storeObject, mergeTree = _a.mergeTree, fieldNodeSet = _a.fieldNodeSet;
|
||||
var entityRef = makeReference(dataId);
|
||||
if (mergeTree && mergeTree.map.size) {
|
||||
var applied = _this.applyMerges(mergeTree, entityRef, storeObject, context);
|
||||
if (isReference(applied)) {
|
||||
// Assume References returned by applyMerges have already been merged
|
||||
// into the store. See makeMergeObjectsFunction in policies.ts for an
|
||||
// example of how this can happen.
|
||||
return;
|
||||
}
|
||||
// Otherwise, applyMerges returned a StoreObject, whose fields we should
|
||||
// merge into the store (see store.merge statement below).
|
||||
storeObject = applied;
|
||||
}
|
||||
if (globalThis.__DEV__ !== false && !context.overwrite) {
|
||||
var fieldsWithSelectionSets_1 = Object.create(null);
|
||||
fieldNodeSet.forEach(function (field) {
|
||||
if (field.selectionSet) {
|
||||
fieldsWithSelectionSets_1[field.name.value] = true;
|
||||
}
|
||||
});
|
||||
var hasSelectionSet_1 = function (storeFieldName) {
|
||||
return fieldsWithSelectionSets_1[fieldNameFromStoreName(storeFieldName)] ===
|
||||
true;
|
||||
};
|
||||
var hasMergeFunction_1 = function (storeFieldName) {
|
||||
var childTree = mergeTree && mergeTree.map.get(storeFieldName);
|
||||
return Boolean(childTree && childTree.info && childTree.info.merge);
|
||||
};
|
||||
Object.keys(storeObject).forEach(function (storeFieldName) {
|
||||
// If a merge function was defined for this field, trust that it
|
||||
// did the right thing about (not) clobbering data. If the field
|
||||
// has no selection set, it's a scalar field, so it doesn't need
|
||||
// a merge function (even if it's an object, like JSON data).
|
||||
if (hasSelectionSet_1(storeFieldName) &&
|
||||
!hasMergeFunction_1(storeFieldName)) {
|
||||
warnAboutDataLoss(entityRef, storeObject, storeFieldName, context.store);
|
||||
}
|
||||
});
|
||||
}
|
||||
store.merge(dataId, storeObject);
|
||||
});
|
||||
// Any IDs written explicitly to the cache will be retained as
|
||||
// reachable root IDs for garbage collection purposes. Although this
|
||||
// logic includes root IDs like ROOT_QUERY and ROOT_MUTATION, their
|
||||
// retainment counts are effectively ignored because cache.gc() always
|
||||
// includes them in its root ID set.
|
||||
store.retain(ref.__ref);
|
||||
return ref;
|
||||
};
|
||||
StoreWriter.prototype.processSelectionSet = function (_a) {
|
||||
var _this = this;
|
||||
var dataId = _a.dataId, result = _a.result, selectionSet = _a.selectionSet, context = _a.context,
|
||||
// This object allows processSelectionSet to report useful information
|
||||
// to its callers without explicitly returning that information.
|
||||
mergeTree = _a.mergeTree;
|
||||
var policies = this.cache.policies;
|
||||
// This variable will be repeatedly updated using context.merge to
|
||||
// accumulate all fields that need to be written into the store.
|
||||
var incoming = Object.create(null);
|
||||
// If typename was not passed in, infer it. Note that typename is
|
||||
// always passed in for tricky-to-infer cases such as "Query" for
|
||||
// ROOT_QUERY.
|
||||
var typename = (dataId && policies.rootTypenamesById[dataId]) ||
|
||||
getTypenameFromResult(result, selectionSet, context.fragmentMap) ||
|
||||
(dataId && context.store.get(dataId, "__typename"));
|
||||
if ("string" === typeof typename) {
|
||||
incoming.__typename = typename;
|
||||
}
|
||||
// This readField function will be passed as context.readField in the
|
||||
// KeyFieldsContext object created within policies.identify (called below).
|
||||
// In addition to reading from the existing context.store (thanks to the
|
||||
// policies.readField(options, context) line at the very bottom), this
|
||||
// version of readField can read from Reference objects that are currently
|
||||
// pending in context.incomingById, which is important whenever keyFields
|
||||
// need to be extracted from a child object that processSelectionSet has
|
||||
// turned into a Reference.
|
||||
var readField = function () {
|
||||
var options = normalizeReadFieldOptions(arguments, incoming, context.variables);
|
||||
if (isReference(options.from)) {
|
||||
var info = context.incomingById.get(options.from.__ref);
|
||||
if (info) {
|
||||
var result_1 = policies.readField(__assign(__assign({}, options), { from: info.storeObject }), context);
|
||||
if (result_1 !== void 0) {
|
||||
return result_1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return policies.readField(options, context);
|
||||
};
|
||||
var fieldNodeSet = new Set();
|
||||
this.flattenFields(selectionSet, result,
|
||||
// This WriteContext will be the default context value for fields returned
|
||||
// by the flattenFields method, but some fields may be assigned a modified
|
||||
// context, depending on the presence of @client and other directives.
|
||||
context, typename).forEach(function (context, field) {
|
||||
var _a;
|
||||
var resultFieldKey = resultKeyNameFromField(field);
|
||||
var value = result[resultFieldKey];
|
||||
fieldNodeSet.add(field);
|
||||
if (value !== void 0) {
|
||||
var storeFieldName = policies.getStoreFieldName({
|
||||
typename: typename,
|
||||
fieldName: field.name.value,
|
||||
field: field,
|
||||
variables: context.variables,
|
||||
});
|
||||
var childTree = getChildMergeTree(mergeTree, storeFieldName);
|
||||
var incomingValue = _this.processFieldValue(value, field,
|
||||
// Reset context.clientOnly and context.deferred to their default
|
||||
// values before processing nested selection sets.
|
||||
field.selectionSet ?
|
||||
getContextFlavor(context, false, false)
|
||||
: context, childTree);
|
||||
// To determine if this field holds a child object with a merge function
|
||||
// defined in its type policy (see PR #7070), we need to figure out the
|
||||
// child object's __typename.
|
||||
var childTypename = void 0;
|
||||
// The field's value can be an object that has a __typename only if the
|
||||
// field has a selection set. Otherwise incomingValue is scalar.
|
||||
if (field.selectionSet &&
|
||||
(isReference(incomingValue) || storeValueIsStoreObject(incomingValue))) {
|
||||
childTypename = readField("__typename", incomingValue);
|
||||
}
|
||||
var merge = policies.getMergeFunction(typename, field.name.value, childTypename);
|
||||
if (merge) {
|
||||
childTree.info = {
|
||||
// TODO Check compatibility against any existing childTree.field?
|
||||
field: field,
|
||||
typename: typename,
|
||||
merge: merge,
|
||||
};
|
||||
}
|
||||
else {
|
||||
maybeRecycleChildMergeTree(mergeTree, storeFieldName);
|
||||
}
|
||||
incoming = context.merge(incoming, (_a = {},
|
||||
_a[storeFieldName] = incomingValue,
|
||||
_a));
|
||||
}
|
||||
else if (globalThis.__DEV__ !== false &&
|
||||
!context.clientOnly &&
|
||||
!context.deferred &&
|
||||
!addTypenameToDocument.added(field) &&
|
||||
// If the field has a read function, it may be a synthetic field or
|
||||
// provide a default value, so its absence from the written data should
|
||||
// not be cause for alarm.
|
||||
!policies.getReadFunction(typename, field.name.value)) {
|
||||
globalThis.__DEV__ !== false && invariant.error(12, resultKeyNameFromField(field), result);
|
||||
}
|
||||
});
|
||||
// Identify the result object, even if dataId was already provided,
|
||||
// since we always need keyObject below.
|
||||
try {
|
||||
var _b = policies.identify(result, {
|
||||
typename: typename,
|
||||
selectionSet: selectionSet,
|
||||
fragmentMap: context.fragmentMap,
|
||||
storeObject: incoming,
|
||||
readField: readField,
|
||||
}), id = _b[0], keyObject = _b[1];
|
||||
// If dataId was not provided, fall back to the id just generated by
|
||||
// policies.identify.
|
||||
dataId = dataId || id;
|
||||
// Write any key fields that were used during identification, even if
|
||||
// they were not mentioned in the original query.
|
||||
if (keyObject) {
|
||||
// TODO Reverse the order of the arguments?
|
||||
incoming = context.merge(incoming, keyObject);
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// If dataId was provided, tolerate failure of policies.identify.
|
||||
if (!dataId)
|
||||
throw e;
|
||||
}
|
||||
if ("string" === typeof dataId) {
|
||||
var dataRef = makeReference(dataId);
|
||||
// Avoid processing the same entity object using the same selection
|
||||
// set more than once. We use an array instead of a Set since most
|
||||
// entity IDs will be written using only one selection set, so the
|
||||
// size of this array is likely to be very small, meaning indexOf is
|
||||
// likely to be faster than Set.prototype.has.
|
||||
var sets = context.written[dataId] || (context.written[dataId] = []);
|
||||
if (sets.indexOf(selectionSet) >= 0)
|
||||
return dataRef;
|
||||
sets.push(selectionSet);
|
||||
// If we're about to write a result object into the store, but we
|
||||
// happen to know that the exact same (===) result object would be
|
||||
// returned if we were to reread the result with the same inputs,
|
||||
// then we can skip the rest of the processSelectionSet work for
|
||||
// this object, and immediately return a Reference to it.
|
||||
if (this.reader &&
|
||||
this.reader.isFresh(result, dataRef, selectionSet, context)) {
|
||||
return dataRef;
|
||||
}
|
||||
var previous_1 = context.incomingById.get(dataId);
|
||||
if (previous_1) {
|
||||
previous_1.storeObject = context.merge(previous_1.storeObject, incoming);
|
||||
previous_1.mergeTree = mergeMergeTrees(previous_1.mergeTree, mergeTree);
|
||||
fieldNodeSet.forEach(function (field) { return previous_1.fieldNodeSet.add(field); });
|
||||
}
|
||||
else {
|
||||
context.incomingById.set(dataId, {
|
||||
storeObject: incoming,
|
||||
// Save a reference to mergeTree only if it is not empty, because
|
||||
// empty MergeTrees may be recycled by maybeRecycleChildMergeTree and
|
||||
// reused for entirely different parts of the result tree.
|
||||
mergeTree: mergeTreeIsEmpty(mergeTree) ? void 0 : mergeTree,
|
||||
fieldNodeSet: fieldNodeSet,
|
||||
});
|
||||
}
|
||||
return dataRef;
|
||||
}
|
||||
return incoming;
|
||||
};
|
||||
StoreWriter.prototype.processFieldValue = function (value, field, context, mergeTree) {
|
||||
var _this = this;
|
||||
if (!field.selectionSet || value === null) {
|
||||
// In development, we need to clone scalar values so that they can be
|
||||
// safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
|
||||
// it's cheaper to store the scalar values directly in the cache.
|
||||
return globalThis.__DEV__ !== false ? cloneDeep(value) : value;
|
||||
}
|
||||
if (isArray(value)) {
|
||||
return value.map(function (item, i) {
|
||||
var value = _this.processFieldValue(item, field, context, getChildMergeTree(mergeTree, i));
|
||||
maybeRecycleChildMergeTree(mergeTree, i);
|
||||
return value;
|
||||
});
|
||||
}
|
||||
return this.processSelectionSet({
|
||||
result: value,
|
||||
selectionSet: field.selectionSet,
|
||||
context: context,
|
||||
mergeTree: mergeTree,
|
||||
});
|
||||
};
|
||||
// Implements https://spec.graphql.org/draft/#sec-Field-Collection, but with
|
||||
// some additions for tracking @client and @defer directives.
|
||||
StoreWriter.prototype.flattenFields = function (selectionSet, result, context, typename) {
|
||||
if (typename === void 0) { typename = getTypenameFromResult(result, selectionSet, context.fragmentMap); }
|
||||
var fieldMap = new Map();
|
||||
var policies = this.cache.policies;
|
||||
var limitingTrie = new Trie(false); // No need for WeakMap, since limitingTrie does not escape.
|
||||
(function flatten(selectionSet, inheritedContext) {
|
||||
var visitedNode = limitingTrie.lookup(selectionSet,
|
||||
// Because we take inheritedClientOnly and inheritedDeferred into
|
||||
// consideration here (in addition to selectionSet), it's possible for
|
||||
// the same selection set to be flattened more than once, if it appears
|
||||
// in the query with different @client and/or @directive configurations.
|
||||
inheritedContext.clientOnly, inheritedContext.deferred);
|
||||
if (visitedNode.visited)
|
||||
return;
|
||||
visitedNode.visited = true;
|
||||
selectionSet.selections.forEach(function (selection) {
|
||||
if (!shouldInclude(selection, context.variables))
|
||||
return;
|
||||
var clientOnly = inheritedContext.clientOnly, deferred = inheritedContext.deferred;
|
||||
if (
|
||||
// Since the presence of @client or @defer on this field can only
|
||||
// cause clientOnly or deferred to become true, we can skip the
|
||||
// forEach loop if both clientOnly and deferred are already true.
|
||||
!(clientOnly && deferred) &&
|
||||
isNonEmptyArray(selection.directives)) {
|
||||
selection.directives.forEach(function (dir) {
|
||||
var name = dir.name.value;
|
||||
if (name === "client")
|
||||
clientOnly = true;
|
||||
if (name === "defer") {
|
||||
var args = argumentsObjectFromField(dir, context.variables);
|
||||
// The @defer directive takes an optional args.if boolean
|
||||
// argument, similar to @include(if: boolean). Note that
|
||||
// @defer(if: false) does not make context.deferred false, but
|
||||
// instead behaves as if there was no @defer directive.
|
||||
if (!args || args.if !== false) {
|
||||
deferred = true;
|
||||
}
|
||||
// TODO In the future, we may want to record args.label using
|
||||
// context.deferred, if a label is specified.
|
||||
}
|
||||
});
|
||||
}
|
||||
if (isField(selection)) {
|
||||
var existing = fieldMap.get(selection);
|
||||
if (existing) {
|
||||
// If this field has been visited along another recursive path
|
||||
// before, the final context should have clientOnly or deferred set
|
||||
// to true only if *all* paths have the directive (hence the &&).
|
||||
clientOnly = clientOnly && existing.clientOnly;
|
||||
deferred = deferred && existing.deferred;
|
||||
}
|
||||
fieldMap.set(selection, getContextFlavor(context, clientOnly, deferred));
|
||||
}
|
||||
else {
|
||||
var fragment = getFragmentFromSelection(selection, context.lookupFragment);
|
||||
if (!fragment && selection.kind === Kind.FRAGMENT_SPREAD) {
|
||||
throw newInvariantError(13, selection.name.value);
|
||||
}
|
||||
if (fragment &&
|
||||
policies.fragmentMatches(fragment, typename, result, context.variables)) {
|
||||
flatten(fragment.selectionSet, getContextFlavor(context, clientOnly, deferred));
|
||||
}
|
||||
}
|
||||
});
|
||||
})(selectionSet, context);
|
||||
return fieldMap;
|
||||
};
|
||||
StoreWriter.prototype.applyMerges = function (mergeTree, existing, incoming, context, getStorageArgs) {
|
||||
var _a;
|
||||
var _this = this;
|
||||
if (mergeTree.map.size && !isReference(incoming)) {
|
||||
var e_1 =
|
||||
// Items in the same position in different arrays are not
|
||||
// necessarily related to each other, so when incoming is an array
|
||||
// we process its elements as if there was no existing data.
|
||||
(!isArray(incoming) &&
|
||||
// Likewise, existing must be either a Reference or a StoreObject
|
||||
// in order for its fields to be safe to merge with the fields of
|
||||
// the incoming object.
|
||||
(isReference(existing) || storeValueIsStoreObject(existing))) ?
|
||||
existing
|
||||
: void 0;
|
||||
// This narrowing is implied by mergeTree.map.size > 0 and
|
||||
// !isReference(incoming), though TypeScript understandably cannot
|
||||
// hope to infer this type.
|
||||
var i_1 = incoming;
|
||||
// The options.storage objects provided to read and merge functions
|
||||
// are derived from the identity of the parent object plus a
|
||||
// sequence of storeFieldName strings/numbers identifying the nested
|
||||
// field name path of each field value to be merged.
|
||||
if (e_1 && !getStorageArgs) {
|
||||
getStorageArgs = [isReference(e_1) ? e_1.__ref : e_1];
|
||||
}
|
||||
// It's possible that applying merge functions to this subtree will
|
||||
// not change the incoming data, so this variable tracks the fields
|
||||
// that did change, so we can create a new incoming object when (and
|
||||
// only when) at least one incoming field has changed. We use a Map
|
||||
// to preserve the type of numeric keys.
|
||||
var changedFields_1;
|
||||
var getValue_1 = function (from, name) {
|
||||
return (isArray(from) ?
|
||||
typeof name === "number" ?
|
||||
from[name]
|
||||
: void 0
|
||||
: context.store.getFieldValue(from, String(name)));
|
||||
};
|
||||
mergeTree.map.forEach(function (childTree, storeFieldName) {
|
||||
var eVal = getValue_1(e_1, storeFieldName);
|
||||
var iVal = getValue_1(i_1, storeFieldName);
|
||||
// If we have no incoming data, leave any existing data untouched.
|
||||
if (void 0 === iVal)
|
||||
return;
|
||||
if (getStorageArgs) {
|
||||
getStorageArgs.push(storeFieldName);
|
||||
}
|
||||
var aVal = _this.applyMerges(childTree, eVal, iVal, context, getStorageArgs);
|
||||
if (aVal !== iVal) {
|
||||
changedFields_1 = changedFields_1 || new Map();
|
||||
changedFields_1.set(storeFieldName, aVal);
|
||||
}
|
||||
if (getStorageArgs) {
|
||||
invariant(getStorageArgs.pop() === storeFieldName);
|
||||
}
|
||||
});
|
||||
if (changedFields_1) {
|
||||
// Shallow clone i so we can add changed fields to it.
|
||||
incoming = (isArray(i_1) ? i_1.slice(0) : __assign({}, i_1));
|
||||
changedFields_1.forEach(function (value, name) {
|
||||
incoming[name] = value;
|
||||
});
|
||||
}
|
||||
}
|
||||
if (mergeTree.info) {
|
||||
return this.cache.policies.runMergeFunction(existing, incoming, mergeTree.info, context, getStorageArgs && (_a = context.store).getStorage.apply(_a, getStorageArgs));
|
||||
}
|
||||
return incoming;
|
||||
};
|
||||
return StoreWriter;
|
||||
}());
|
||||
export { StoreWriter };
|
||||
var emptyMergeTreePool = [];
|
||||
function getChildMergeTree(_a, name) {
|
||||
var map = _a.map;
|
||||
if (!map.has(name)) {
|
||||
map.set(name, emptyMergeTreePool.pop() || { map: new Map() });
|
||||
}
|
||||
return map.get(name);
|
||||
}
|
||||
function mergeMergeTrees(left, right) {
|
||||
if (left === right || !right || mergeTreeIsEmpty(right))
|
||||
return left;
|
||||
if (!left || mergeTreeIsEmpty(left))
|
||||
return right;
|
||||
var info = left.info && right.info ? __assign(__assign({}, left.info), right.info) : left.info || right.info;
|
||||
var needToMergeMaps = left.map.size && right.map.size;
|
||||
var map = needToMergeMaps ? new Map()
|
||||
: left.map.size ? left.map
|
||||
: right.map;
|
||||
var merged = { info: info, map: map };
|
||||
if (needToMergeMaps) {
|
||||
var remainingRightKeys_1 = new Set(right.map.keys());
|
||||
left.map.forEach(function (leftTree, key) {
|
||||
merged.map.set(key, mergeMergeTrees(leftTree, right.map.get(key)));
|
||||
remainingRightKeys_1.delete(key);
|
||||
});
|
||||
remainingRightKeys_1.forEach(function (key) {
|
||||
merged.map.set(key, mergeMergeTrees(right.map.get(key), left.map.get(key)));
|
||||
});
|
||||
}
|
||||
return merged;
|
||||
}
|
||||
function mergeTreeIsEmpty(tree) {
|
||||
return !tree || !(tree.info || tree.map.size);
|
||||
}
|
||||
function maybeRecycleChildMergeTree(_a, name) {
|
||||
var map = _a.map;
|
||||
var childTree = map.get(name);
|
||||
if (childTree && mergeTreeIsEmpty(childTree)) {
|
||||
emptyMergeTreePool.push(childTree);
|
||||
map.delete(name);
|
||||
}
|
||||
}
|
||||
var warnings = new Set();
|
||||
// Note that this function is unused in production, and thus should be
|
||||
// pruned by any well-configured minifier.
|
||||
function warnAboutDataLoss(existingRef, incomingObj, storeFieldName, store) {
|
||||
var getChild = function (objOrRef) {
|
||||
var child = store.getFieldValue(objOrRef, storeFieldName);
|
||||
return typeof child === "object" && child;
|
||||
};
|
||||
var existing = getChild(existingRef);
|
||||
if (!existing)
|
||||
return;
|
||||
var incoming = getChild(incomingObj);
|
||||
if (!incoming)
|
||||
return;
|
||||
// It's always safe to replace a reference, since it refers to data
|
||||
// safely stored elsewhere.
|
||||
if (isReference(existing))
|
||||
return;
|
||||
// If the values are structurally equivalent, we do not need to worry
|
||||
// about incoming replacing existing.
|
||||
if (equal(existing, incoming))
|
||||
return;
|
||||
// If we're replacing every key of the existing object, then the
|
||||
// existing data would be overwritten even if the objects were
|
||||
// normalized, so warning would not be helpful here.
|
||||
if (Object.keys(existing).every(function (key) { return store.getFieldValue(incoming, key) !== void 0; })) {
|
||||
return;
|
||||
}
|
||||
var parentType = store.getFieldValue(existingRef, "__typename") ||
|
||||
store.getFieldValue(incomingObj, "__typename");
|
||||
var fieldName = fieldNameFromStoreName(storeFieldName);
|
||||
var typeDotName = "".concat(parentType, ".").concat(fieldName);
|
||||
// Avoid warning more than once for the same type and field name.
|
||||
if (warnings.has(typeDotName))
|
||||
return;
|
||||
warnings.add(typeDotName);
|
||||
var childTypenames = [];
|
||||
// Arrays do not have __typename fields, and always need a custom merge
|
||||
// function, even if their elements are normalized entities.
|
||||
if (!isArray(existing) && !isArray(incoming)) {
|
||||
[existing, incoming].forEach(function (child) {
|
||||
var typename = store.getFieldValue(child, "__typename");
|
||||
if (typeof typename === "string" && !childTypenames.includes(typename)) {
|
||||
childTypenames.push(typename);
|
||||
}
|
||||
});
|
||||
}
|
||||
globalThis.__DEV__ !== false && invariant.warn(14, fieldName, parentType, childTypenames.length ?
|
||||
"either ensure all objects of type " +
|
||||
childTypenames.join(" and ") +
|
||||
" have an ID or a custom merge function, or "
|
||||
: "", typeDotName, existing, incoming);
|
||||
}
|
||||
//# sourceMappingURL=writeToStore.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/writeToStore.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/cache/inmemory/writeToStore.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
8
graphql-subscription/node_modules/@apollo/client/cache/package.json
generated
vendored
Normal file
8
graphql-subscription/node_modules/@apollo/client/cache/package.json
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "@apollo/client/cache",
|
||||
"type": "module",
|
||||
"main": "cache.cjs",
|
||||
"module": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"sideEffects": false
|
||||
}
|
||||
3
graphql-subscription/node_modules/@apollo/client/config/jest/setup.d.ts
generated
vendored
Normal file
3
graphql-subscription/node_modules/@apollo/client/config/jest/setup.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import "@testing-library/jest-dom";
|
||||
import "../../testing/matchers/index.js";
|
||||
//# sourceMappingURL=setup.d.ts.map
|
||||
25
graphql-subscription/node_modules/@apollo/client/config/jest/setup.js
generated
vendored
Normal file
25
graphql-subscription/node_modules/@apollo/client/config/jest/setup.js
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
import gql from "graphql-tag";
|
||||
import "@testing-library/jest-dom";
|
||||
import { loadErrorMessageHandler } from "../../dev/loadErrorMessageHandler.js";
|
||||
import "../../testing/matchers/index.js";
|
||||
// Turn off warnings for repeated fragment names
|
||||
gql.disableFragmentWarnings();
|
||||
process.on("unhandledRejection", function () { });
|
||||
loadErrorMessageHandler();
|
||||
function fail(reason) {
|
||||
if (reason === void 0) { reason = "fail was called in a test."; }
|
||||
expect(reason).toBe(undefined);
|
||||
}
|
||||
// @ts-ignore
|
||||
globalThis.fail = fail;
|
||||
if (!Symbol.dispose) {
|
||||
Object.defineProperty(Symbol, "dispose", {
|
||||
value: Symbol("dispose"),
|
||||
});
|
||||
}
|
||||
if (!Symbol.asyncDispose) {
|
||||
Object.defineProperty(Symbol, "asyncDispose", {
|
||||
value: Symbol("asyncDispose"),
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=setup.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/config/jest/setup.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/config/jest/setup.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"setup.js","sourceRoot":"","sources":["../../../src/config/jest/setup.ts"],"names":[],"mappings":"AAAA,OAAO,GAAG,MAAM,aAAa,CAAC;AAC9B,OAAO,2BAA2B,CAAC;AACnC,OAAO,EAAE,uBAAuB,EAAE,MAAM,sCAAsC,CAAC;AAC/E,OAAO,iCAAiC,CAAC;AAEzC,gDAAgD;AAChD,GAAG,CAAC,uBAAuB,EAAE,CAAC;AAE9B,OAAO,CAAC,EAAE,CAAC,oBAAoB,EAAE,cAAO,CAAC,CAAC,CAAC;AAE3C,uBAAuB,EAAE,CAAC;AAE1B,SAAS,IAAI,CAAC,MAAqC;IAArC,uBAAA,EAAA,qCAAqC;IACjD,MAAM,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;AACjC,CAAC;AAED,aAAa;AACb,UAAU,CAAC,IAAI,GAAG,IAAI,CAAC;AAEvB,IAAI,CAAC,MAAM,CAAC,OAAO,EAAE,CAAC;IACpB,MAAM,CAAC,cAAc,CAAC,MAAM,EAAE,SAAS,EAAE;QACvC,KAAK,EAAE,MAAM,CAAC,SAAS,CAAC;KACzB,CAAC,CAAC;AACL,CAAC;AACD,IAAI,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC;IACzB,MAAM,CAAC,cAAc,CAAC,MAAM,EAAE,cAAc,EAAE;QAC5C,KAAK,EAAE,MAAM,CAAC,cAAc,CAAC;KAC9B,CAAC,CAAC;AACL,CAAC","sourcesContent":["import gql from \"graphql-tag\";\nimport \"@testing-library/jest-dom\";\nimport { loadErrorMessageHandler } from \"../../dev/loadErrorMessageHandler.js\";\nimport \"../../testing/matchers/index.js\";\n\n// Turn off warnings for repeated fragment names\ngql.disableFragmentWarnings();\n\nprocess.on(\"unhandledRejection\", () => {});\n\nloadErrorMessageHandler();\n\nfunction fail(reason = \"fail was called in a test.\") {\n expect(reason).toBe(undefined);\n}\n\n// @ts-ignore\nglobalThis.fail = fail;\n\nif (!Symbol.dispose) {\n Object.defineProperty(Symbol, \"dispose\", {\n value: Symbol(\"dispose\"),\n });\n}\nif (!Symbol.asyncDispose) {\n Object.defineProperty(Symbol, \"asyncDispose\", {\n value: Symbol(\"asyncDispose\"),\n });\n}\n"]}
|
||||
430
graphql-subscription/node_modules/@apollo/client/core/ApolloClient.d.ts
generated
vendored
Normal file
430
graphql-subscription/node_modules/@apollo/client/core/ApolloClient.d.ts
generated
vendored
Normal file
@@ -0,0 +1,430 @@
|
||||
import type { ExecutionResult, DocumentNode } from "graphql";
|
||||
import type { FetchResult, GraphQLRequest } from "../link/core/index.js";
|
||||
import { ApolloLink } from "../link/core/index.js";
|
||||
import type { ApolloCache, DataProxy, Reference } from "../cache/index.js";
|
||||
import type { DocumentTransform, Observable } from "../utilities/index.js";
|
||||
import type { UriFunction } from "../link/http/index.js";
|
||||
import type { ObservableQuery } from "./ObservableQuery.js";
|
||||
import type { ApolloQueryResult, DefaultContext, OperationVariables, Resolvers, RefetchQueriesOptions, RefetchQueriesResult, RefetchQueriesInclude } from "./types.js";
|
||||
import type { QueryOptions, WatchQueryOptions, MutationOptions, SubscriptionOptions } from "./watchQueryOptions.js";
|
||||
import type { FragmentMatcher } from "./LocalState.js";
|
||||
export interface DefaultOptions {
|
||||
watchQuery?: Partial<WatchQueryOptions<any, any>>;
|
||||
query?: Partial<QueryOptions<any, any>>;
|
||||
mutate?: Partial<MutationOptions<any, any, any>>;
|
||||
}
|
||||
export interface ApolloClientOptions<TCacheShape> {
|
||||
/**
|
||||
* The URI of the GraphQL endpoint that Apollo Client will communicate with.
|
||||
*
|
||||
* One of `uri` or `link` is **required**. If you provide both, `link` takes precedence.
|
||||
*/
|
||||
uri?: string | UriFunction;
|
||||
credentials?: string;
|
||||
/**
|
||||
* An object representing headers to include in every HTTP request, such as `{Authorization: 'Bearer 1234'}`
|
||||
*
|
||||
* This value will be ignored when using the `link` option.
|
||||
*/
|
||||
headers?: Record<string, string>;
|
||||
/**
|
||||
* You can provide an {@link ApolloLink} instance to serve as Apollo Client's network layer. For more information, see [Advanced HTTP networking](https://www.apollographql.com/docs/react/networking/advanced-http-networking/).
|
||||
*
|
||||
* One of `uri` or `link` is **required**. If you provide both, `link` takes precedence.
|
||||
*/
|
||||
link?: ApolloLink;
|
||||
/**
|
||||
* The cache that Apollo Client should use to store query results locally. The recommended cache is `InMemoryCache`, which is provided by the `@apollo/client` package.
|
||||
*
|
||||
* For more information, see [Configuring the cache](https://www.apollographql.com/docs/react/caching/cache-configuration/).
|
||||
*/
|
||||
cache: ApolloCache<TCacheShape>;
|
||||
/**
|
||||
* The time interval (in milliseconds) before Apollo Client force-fetches queries after a server-side render.
|
||||
*
|
||||
* @defaultValue `0` (no delay)
|
||||
*/
|
||||
ssrForceFetchDelay?: number;
|
||||
/**
|
||||
* When using Apollo Client for [server-side rendering](https://www.apollographql.com/docs/react/performance/server-side-rendering/), set this to `true` so that the [`getDataFromTree` function](../react/ssr/#getdatafromtree) can work effectively.
|
||||
*
|
||||
* @defaultValue `false`
|
||||
*/
|
||||
ssrMode?: boolean;
|
||||
/**
|
||||
* If `true`, the [Apollo Client Devtools](https://www.apollographql.com/docs/react/development-testing/developer-tooling/#apollo-client-devtools) browser extension can connect to Apollo Client.
|
||||
*
|
||||
* The default value is `false` in production and `true` in development (if there is a `window` object).
|
||||
*/
|
||||
connectToDevTools?: boolean;
|
||||
/**
|
||||
* If `false`, Apollo Client sends every created query to the server, even if a _completely_ identical query (identical in terms of query string, variable values, and operationName) is already in flight.
|
||||
*
|
||||
* @defaultValue `true`
|
||||
*/
|
||||
queryDeduplication?: boolean;
|
||||
/**
|
||||
* Provide this object to set application-wide default values for options you can provide to the `watchQuery`, `query`, and `mutate` functions. See below for an example object.
|
||||
*
|
||||
* See this [example object](https://www.apollographql.com/docs/react/api/core/ApolloClient#example-defaultoptions-object).
|
||||
*/
|
||||
defaultOptions?: DefaultOptions;
|
||||
defaultContext?: Partial<DefaultContext>;
|
||||
/**
|
||||
* If `true`, Apollo Client will assume results read from the cache are never mutated by application code, which enables substantial performance optimizations.
|
||||
*
|
||||
* @defaultValue `false`
|
||||
*/
|
||||
assumeImmutableResults?: boolean;
|
||||
resolvers?: Resolvers | Resolvers[];
|
||||
typeDefs?: string | string[] | DocumentNode | DocumentNode[];
|
||||
fragmentMatcher?: FragmentMatcher;
|
||||
/**
|
||||
* A custom name (e.g., `iOS`) that identifies this particular client among your set of clients. Apollo Server and Apollo Studio use this property as part of the [client awareness](https://www.apollographql.com/docs/apollo-server/monitoring/metrics#identifying-distinct-clients) feature.
|
||||
*/
|
||||
name?: string;
|
||||
/**
|
||||
* A custom version that identifies the current version of this particular client (e.g., `1.2`). Apollo Server and Apollo Studio use this property as part of the [client awareness](https://www.apollographql.com/docs/apollo-server/monitoring/metrics#identifying-distinct-clients) feature.
|
||||
*
|
||||
* This is **not** the version of Apollo Client that you are using, but rather any version string that helps you differentiate between versions of your client.
|
||||
*/
|
||||
version?: string;
|
||||
documentTransform?: DocumentTransform;
|
||||
}
|
||||
import { mergeOptions } from "../utilities/index.js";
|
||||
import { getApolloClientMemoryInternals } from "../utilities/caching/getMemoryInternals.js";
|
||||
export { mergeOptions };
|
||||
/**
|
||||
* This is the primary Apollo Client class. It is used to send GraphQL documents (i.e. queries
|
||||
* and mutations) to a GraphQL spec-compliant server over an {@link ApolloLink} instance,
|
||||
* receive results from the server and cache the results in a store. It also delivers updates
|
||||
* to GraphQL queries through {@link Observable} instances.
|
||||
*/
|
||||
export declare class ApolloClient<TCacheShape> implements DataProxy {
|
||||
link: ApolloLink;
|
||||
cache: ApolloCache<TCacheShape>;
|
||||
disableNetworkFetches: boolean;
|
||||
version: string;
|
||||
queryDeduplication: boolean;
|
||||
defaultOptions: DefaultOptions;
|
||||
readonly typeDefs: ApolloClientOptions<TCacheShape>["typeDefs"];
|
||||
private queryManager;
|
||||
private devToolsHookCb?;
|
||||
private resetStoreCallbacks;
|
||||
private clearStoreCallbacks;
|
||||
private localState;
|
||||
/**
|
||||
* Constructs an instance of {@link ApolloClient}.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* import { ApolloClient, InMemoryCache } from '@apollo/client';
|
||||
*
|
||||
* const cache = new InMemoryCache();
|
||||
*
|
||||
* const client = new ApolloClient({
|
||||
* // Provide required constructor fields
|
||||
* cache: cache,
|
||||
* uri: 'http://localhost:4000/',
|
||||
*
|
||||
* // Provide some optional constructor fields
|
||||
* name: 'react-web-client',
|
||||
* version: '1.3',
|
||||
* queryDeduplication: false,
|
||||
* defaultOptions: {
|
||||
* watchQuery: {
|
||||
* fetchPolicy: 'cache-and-network',
|
||||
* },
|
||||
* },
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
constructor(options: ApolloClientOptions<TCacheShape>);
|
||||
private connectToDevTools;
|
||||
/**
|
||||
* The `DocumentTransform` used to modify GraphQL documents before a request
|
||||
* is made. If a custom `DocumentTransform` is not provided, this will be the
|
||||
* default document transform.
|
||||
*/
|
||||
get documentTransform(): DocumentTransform;
|
||||
/**
|
||||
* Call this method to terminate any active client processes, making it safe
|
||||
* to dispose of this `ApolloClient` instance.
|
||||
*/
|
||||
stop(): void;
|
||||
/**
|
||||
* This watches the cache store of the query according to the options specified and
|
||||
* returns an {@link ObservableQuery}. We can subscribe to this {@link ObservableQuery} and
|
||||
* receive updated results through a GraphQL observer when the cache store changes.
|
||||
*
|
||||
* Note that this method is not an implementation of GraphQL subscriptions. Rather,
|
||||
* it uses Apollo's store in order to reactively deliver updates to your query results.
|
||||
*
|
||||
* For example, suppose you call watchQuery on a GraphQL query that fetches a person's
|
||||
* first and last name and this person has a particular object identifier, provided by
|
||||
* dataIdFromObject. Later, a different query fetches that same person's
|
||||
* first and last name and the first name has now changed. Then, any observers associated
|
||||
* with the results of the first query will be updated with a new result object.
|
||||
*
|
||||
* Note that if the cache does not change, the subscriber will *not* be notified.
|
||||
*
|
||||
* See [here](https://medium.com/apollo-stack/the-concepts-of-graphql-bc68bd819be3#.3mb0cbcmc) for
|
||||
* a description of store reactivity.
|
||||
*/
|
||||
watchQuery<T = any, TVariables extends OperationVariables = OperationVariables>(options: WatchQueryOptions<TVariables, T>): ObservableQuery<T, TVariables>;
|
||||
/**
|
||||
* This resolves a single query according to the options specified and
|
||||
* returns a `Promise` which is either resolved with the resulting data
|
||||
* or rejected with an error.
|
||||
*
|
||||
* @param options - An object of type {@link QueryOptions} that allows us to
|
||||
* describe how this query should be treated e.g. whether it should hit the
|
||||
* server at all or just resolve from the cache, etc.
|
||||
*/
|
||||
query<T = any, TVariables extends OperationVariables = OperationVariables>(options: QueryOptions<TVariables, T>): Promise<ApolloQueryResult<T>>;
|
||||
/**
|
||||
* This resolves a single mutation according to the options specified and returns a
|
||||
* Promise which is either resolved with the resulting data or rejected with an
|
||||
* error. In some cases both `data` and `errors` might be undefined, for example
|
||||
* when `errorPolicy` is set to `'ignore'`.
|
||||
*
|
||||
* It takes options as an object with the following keys and values:
|
||||
*/
|
||||
mutate<TData = any, TVariables extends OperationVariables = OperationVariables, TContext extends Record<string, any> = DefaultContext, TCache extends ApolloCache<any> = ApolloCache<any>>(options: MutationOptions<TData, TVariables, TContext>): Promise<FetchResult<TData>>;
|
||||
/**
|
||||
* This subscribes to a graphql subscription according to the options specified and returns an
|
||||
* {@link Observable} which either emits received data or an error.
|
||||
*/
|
||||
subscribe<T = any, TVariables extends OperationVariables = OperationVariables>(options: SubscriptionOptions<TVariables, T>): Observable<FetchResult<T>>;
|
||||
/**
|
||||
* Tries to read some data from the store in the shape of the provided
|
||||
* GraphQL query without making a network request. This method will start at
|
||||
* the root query. To start at a specific id returned by `dataIdFromObject`
|
||||
* use `readFragment`.
|
||||
*
|
||||
* @param optimistic - Set to `true` to allow `readQuery` to return
|
||||
* optimistic results. Is `false` by default.
|
||||
*/
|
||||
readQuery<T = any, TVariables = OperationVariables>(options: DataProxy.Query<TVariables, T>, optimistic?: boolean): T | null;
|
||||
/**
|
||||
* Tries to read some data from the store in the shape of the provided
|
||||
* GraphQL fragment without making a network request. This method will read a
|
||||
* GraphQL fragment from any arbitrary id that is currently cached, unlike
|
||||
* `readQuery` which will only read from the root query.
|
||||
*
|
||||
* You must pass in a GraphQL document with a single fragment or a document
|
||||
* with multiple fragments that represent what you are reading. If you pass
|
||||
* in a document with multiple fragments then you must also specify a
|
||||
* `fragmentName`.
|
||||
*
|
||||
* @param optimistic - Set to `true` to allow `readFragment` to return
|
||||
* optimistic results. Is `false` by default.
|
||||
*/
|
||||
readFragment<T = any, TVariables = OperationVariables>(options: DataProxy.Fragment<TVariables, T>, optimistic?: boolean): T | null;
|
||||
/**
|
||||
* Writes some data in the shape of the provided GraphQL query directly to
|
||||
* the store. This method will start at the root query. To start at a
|
||||
* specific id returned by `dataIdFromObject` then use `writeFragment`.
|
||||
*/
|
||||
writeQuery<TData = any, TVariables = OperationVariables>(options: DataProxy.WriteQueryOptions<TData, TVariables>): Reference | undefined;
|
||||
/**
|
||||
* Writes some data in the shape of the provided GraphQL fragment directly to
|
||||
* the store. This method will write to a GraphQL fragment from any arbitrary
|
||||
* id that is currently cached, unlike `writeQuery` which will only write
|
||||
* from the root query.
|
||||
*
|
||||
* You must pass in a GraphQL document with a single fragment or a document
|
||||
* with multiple fragments that represent what you are writing. If you pass
|
||||
* in a document with multiple fragments then you must also specify a
|
||||
* `fragmentName`.
|
||||
*/
|
||||
writeFragment<TData = any, TVariables = OperationVariables>(options: DataProxy.WriteFragmentOptions<TData, TVariables>): Reference | undefined;
|
||||
__actionHookForDevTools(cb: () => any): void;
|
||||
__requestRaw(payload: GraphQLRequest): Observable<ExecutionResult>;
|
||||
/**
|
||||
* Resets your entire store by clearing out your cache and then re-executing
|
||||
* all of your active queries. This makes it so that you may guarantee that
|
||||
* there is no data left in your store from a time before you called this
|
||||
* method.
|
||||
*
|
||||
* `resetStore()` is useful when your user just logged out. You’ve removed the
|
||||
* user session, and you now want to make sure that any references to data you
|
||||
* might have fetched while the user session was active is gone.
|
||||
*
|
||||
* It is important to remember that `resetStore()` *will* refetch any active
|
||||
* queries. This means that any components that might be mounted will execute
|
||||
* their queries again using your network interface. If you do not want to
|
||||
* re-execute any queries then you should make sure to stop watching any
|
||||
* active queries.
|
||||
*/
|
||||
resetStore(): Promise<ApolloQueryResult<any>[] | null>;
|
||||
/**
|
||||
* Remove all data from the store. Unlike `resetStore`, `clearStore` will
|
||||
* not refetch any active queries.
|
||||
*/
|
||||
clearStore(): Promise<any[]>;
|
||||
/**
|
||||
* Allows callbacks to be registered that are executed when the store is
|
||||
* reset. `onResetStore` returns an unsubscribe function that can be used
|
||||
* to remove registered callbacks.
|
||||
*/
|
||||
onResetStore(cb: () => Promise<any>): () => void;
|
||||
/**
|
||||
* Allows callbacks to be registered that are executed when the store is
|
||||
* cleared. `onClearStore` returns an unsubscribe function that can be used
|
||||
* to remove registered callbacks.
|
||||
*/
|
||||
onClearStore(cb: () => Promise<any>): () => void;
|
||||
/**
|
||||
* Refetches all of your active queries.
|
||||
*
|
||||
* `reFetchObservableQueries()` is useful if you want to bring the client back to proper state in case of a network outage
|
||||
*
|
||||
* It is important to remember that `reFetchObservableQueries()` *will* refetch any active
|
||||
* queries. This means that any components that might be mounted will execute
|
||||
* their queries again using your network interface. If you do not want to
|
||||
* re-execute any queries then you should make sure to stop watching any
|
||||
* active queries.
|
||||
* Takes optional parameter `includeStandby` which will include queries in standby-mode when refetching.
|
||||
*/
|
||||
reFetchObservableQueries(includeStandby?: boolean): Promise<ApolloQueryResult<any>[]>;
|
||||
/**
|
||||
* Refetches specified active queries. Similar to "reFetchObservableQueries()" but with a specific list of queries.
|
||||
*
|
||||
* `refetchQueries()` is useful for use cases to imperatively refresh a selection of queries.
|
||||
*
|
||||
* It is important to remember that `refetchQueries()` *will* refetch specified active
|
||||
* queries. This means that any components that might be mounted will execute
|
||||
* their queries again using your network interface. If you do not want to
|
||||
* re-execute any queries then you should make sure to stop watching any
|
||||
* active queries.
|
||||
*/
|
||||
refetchQueries<TCache extends ApolloCache<any> = ApolloCache<TCacheShape>, TResult = Promise<ApolloQueryResult<any>>>(options: RefetchQueriesOptions<TCache, TResult>): RefetchQueriesResult<TResult>;
|
||||
/**
|
||||
* Get all currently active `ObservableQuery` objects, in a `Map` keyed by
|
||||
* query ID strings.
|
||||
*
|
||||
* An "active" query is one that has observers and a `fetchPolicy` other than
|
||||
* "standby" or "cache-only".
|
||||
*
|
||||
* You can include all `ObservableQuery` objects (including the inactive ones)
|
||||
* by passing "all" instead of "active", or you can include just a subset of
|
||||
* active queries by passing an array of query names or DocumentNode objects.
|
||||
*/
|
||||
getObservableQueries(include?: RefetchQueriesInclude): Map<string, ObservableQuery<any>>;
|
||||
/**
|
||||
* Exposes the cache's complete state, in a serializable format for later restoration.
|
||||
*/
|
||||
extract(optimistic?: boolean): TCacheShape;
|
||||
/**
|
||||
* Replaces existing state in the cache (if any) with the values expressed by
|
||||
* `serializedState`.
|
||||
*
|
||||
* Called when hydrating a cache (server side rendering, or offline storage),
|
||||
* and also (potentially) during hot reloads.
|
||||
*/
|
||||
restore(serializedState: TCacheShape): ApolloCache<TCacheShape>;
|
||||
/**
|
||||
* Add additional local resolvers.
|
||||
*/
|
||||
addResolvers(resolvers: Resolvers | Resolvers[]): void;
|
||||
/**
|
||||
* Set (override existing) local resolvers.
|
||||
*/
|
||||
setResolvers(resolvers: Resolvers | Resolvers[]): void;
|
||||
/**
|
||||
* Get all registered local resolvers.
|
||||
*/
|
||||
getResolvers(): Resolvers;
|
||||
/**
|
||||
* Set a custom local state fragment matcher.
|
||||
*/
|
||||
setLocalStateFragmentMatcher(fragmentMatcher: FragmentMatcher): void;
|
||||
/**
|
||||
* Define a new ApolloLink (or link chain) that Apollo Client will use.
|
||||
*/
|
||||
setLink(newLink: ApolloLink): void;
|
||||
get defaultContext(): Partial<DefaultContext>;
|
||||
/**
|
||||
* @experimental
|
||||
* This is not a stable API - it is used in development builds to expose
|
||||
* information to the DevTools.
|
||||
* Use at your own risk!
|
||||
* For more details, see [Memory Management](https://www.apollographql.com/docs/react/caching/memory-management/#measuring-cache-usage)
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* console.log(client.getMemoryInternals())
|
||||
* ```
|
||||
* Logs output in the following JSON format:
|
||||
* @example
|
||||
* ```json
|
||||
*{
|
||||
* limits: {
|
||||
* parser: 1000,
|
||||
* canonicalStringify: 1000,
|
||||
* print: 2000,
|
||||
* 'documentTransform.cache': 2000,
|
||||
* 'queryManager.getDocumentInfo': 2000,
|
||||
* 'PersistedQueryLink.persistedQueryHashes': 2000,
|
||||
* 'fragmentRegistry.transform': 2000,
|
||||
* 'fragmentRegistry.lookup': 1000,
|
||||
* 'fragmentRegistry.findFragmentSpreads': 4000,
|
||||
* 'cache.fragmentQueryDocuments': 1000,
|
||||
* 'removeTypenameFromVariables.getVariableDefinitions': 2000,
|
||||
* 'inMemoryCache.maybeBroadcastWatch': 5000,
|
||||
* 'inMemoryCache.executeSelectionSet': 10000,
|
||||
* 'inMemoryCache.executeSubSelectedArray': 5000
|
||||
* },
|
||||
* sizes: {
|
||||
* parser: 26,
|
||||
* canonicalStringify: 4,
|
||||
* print: 14,
|
||||
* addTypenameDocumentTransform: [
|
||||
* {
|
||||
* cache: 14,
|
||||
* },
|
||||
* ],
|
||||
* queryManager: {
|
||||
* getDocumentInfo: 14,
|
||||
* documentTransforms: [
|
||||
* {
|
||||
* cache: 14,
|
||||
* },
|
||||
* {
|
||||
* cache: 14,
|
||||
* },
|
||||
* ],
|
||||
* },
|
||||
* fragmentRegistry: {
|
||||
* findFragmentSpreads: 34,
|
||||
* lookup: 20,
|
||||
* transform: 14,
|
||||
* },
|
||||
* cache: {
|
||||
* fragmentQueryDocuments: 22,
|
||||
* },
|
||||
* inMemoryCache: {
|
||||
* executeSelectionSet: 4345,
|
||||
* executeSubSelectedArray: 1206,
|
||||
* maybeBroadcastWatch: 32,
|
||||
* },
|
||||
* links: [
|
||||
* {
|
||||
* PersistedQueryLink: {
|
||||
* persistedQueryHashes: 14,
|
||||
* },
|
||||
* },
|
||||
* {
|
||||
* removeTypenameFromVariables: {
|
||||
* getVariableDefinitions: 14,
|
||||
* },
|
||||
* },
|
||||
* ],
|
||||
* },
|
||||
* }
|
||||
*```
|
||||
*/
|
||||
getMemoryInternals?: typeof getApolloClientMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=ApolloClient.d.ts.map
|
||||
508
graphql-subscription/node_modules/@apollo/client/core/ApolloClient.js
generated
vendored
Normal file
508
graphql-subscription/node_modules/@apollo/client/core/ApolloClient.js
generated
vendored
Normal file
@@ -0,0 +1,508 @@
|
||||
import { __assign } from "tslib";
|
||||
import { invariant, newInvariantError } from "../utilities/globals/index.js";
|
||||
import { ApolloLink, execute } from "../link/core/index.js";
|
||||
import { version } from "../version.js";
|
||||
import { HttpLink } from "../link/http/index.js";
|
||||
import { QueryManager } from "./QueryManager.js";
|
||||
import { LocalState } from "./LocalState.js";
|
||||
var hasSuggestedDevtools = false;
|
||||
// Though mergeOptions now resides in @apollo/client/utilities, it was
|
||||
// previously declared and exported from this module, and then reexported from
|
||||
// @apollo/client/core. Since we need to preserve that API anyway, the easiest
|
||||
// solution is to reexport mergeOptions where it was previously declared (here).
|
||||
import { mergeOptions } from "../utilities/index.js";
|
||||
import { getApolloClientMemoryInternals } from "../utilities/caching/getMemoryInternals.js";
|
||||
export { mergeOptions };
|
||||
/**
|
||||
* This is the primary Apollo Client class. It is used to send GraphQL documents (i.e. queries
|
||||
* and mutations) to a GraphQL spec-compliant server over an {@link ApolloLink} instance,
|
||||
* receive results from the server and cache the results in a store. It also delivers updates
|
||||
* to GraphQL queries through {@link Observable} instances.
|
||||
*/
|
||||
var ApolloClient = /** @class */ (function () {
|
||||
/**
|
||||
* Constructs an instance of {@link ApolloClient}.
|
||||
*
|
||||
* @example
|
||||
* ```js
|
||||
* import { ApolloClient, InMemoryCache } from '@apollo/client';
|
||||
*
|
||||
* const cache = new InMemoryCache();
|
||||
*
|
||||
* const client = new ApolloClient({
|
||||
* // Provide required constructor fields
|
||||
* cache: cache,
|
||||
* uri: 'http://localhost:4000/',
|
||||
*
|
||||
* // Provide some optional constructor fields
|
||||
* name: 'react-web-client',
|
||||
* version: '1.3',
|
||||
* queryDeduplication: false,
|
||||
* defaultOptions: {
|
||||
* watchQuery: {
|
||||
* fetchPolicy: 'cache-and-network',
|
||||
* },
|
||||
* },
|
||||
* });
|
||||
* ```
|
||||
*/
|
||||
function ApolloClient(options) {
|
||||
var _this = this;
|
||||
this.resetStoreCallbacks = [];
|
||||
this.clearStoreCallbacks = [];
|
||||
if (!options.cache) {
|
||||
throw newInvariantError(15);
|
||||
}
|
||||
var uri = options.uri, credentials = options.credentials, headers = options.headers, cache = options.cache, documentTransform = options.documentTransform, _a = options.ssrMode, ssrMode = _a === void 0 ? false : _a, _b = options.ssrForceFetchDelay, ssrForceFetchDelay = _b === void 0 ? 0 : _b,
|
||||
// Expose the client instance as window.__APOLLO_CLIENT__ and call
|
||||
// onBroadcast in queryManager.broadcastQueries to enable browser
|
||||
// devtools, but disable them by default in production.
|
||||
_c = options.connectToDevTools,
|
||||
// Expose the client instance as window.__APOLLO_CLIENT__ and call
|
||||
// onBroadcast in queryManager.broadcastQueries to enable browser
|
||||
// devtools, but disable them by default in production.
|
||||
connectToDevTools = _c === void 0 ? typeof window === "object" &&
|
||||
!window.__APOLLO_CLIENT__ &&
|
||||
globalThis.__DEV__ !== false : _c, _d = options.queryDeduplication, queryDeduplication = _d === void 0 ? true : _d, defaultOptions = options.defaultOptions, defaultContext = options.defaultContext, _e = options.assumeImmutableResults, assumeImmutableResults = _e === void 0 ? cache.assumeImmutableResults : _e, resolvers = options.resolvers, typeDefs = options.typeDefs, fragmentMatcher = options.fragmentMatcher, clientAwarenessName = options.name, clientAwarenessVersion = options.version;
|
||||
var link = options.link;
|
||||
if (!link) {
|
||||
link =
|
||||
uri ? new HttpLink({ uri: uri, credentials: credentials, headers: headers }) : ApolloLink.empty();
|
||||
}
|
||||
this.link = link;
|
||||
this.cache = cache;
|
||||
this.disableNetworkFetches = ssrMode || ssrForceFetchDelay > 0;
|
||||
this.queryDeduplication = queryDeduplication;
|
||||
this.defaultOptions = defaultOptions || Object.create(null);
|
||||
this.typeDefs = typeDefs;
|
||||
if (ssrForceFetchDelay) {
|
||||
setTimeout(function () { return (_this.disableNetworkFetches = false); }, ssrForceFetchDelay);
|
||||
}
|
||||
this.watchQuery = this.watchQuery.bind(this);
|
||||
this.query = this.query.bind(this);
|
||||
this.mutate = this.mutate.bind(this);
|
||||
this.resetStore = this.resetStore.bind(this);
|
||||
this.reFetchObservableQueries = this.reFetchObservableQueries.bind(this);
|
||||
this.version = version;
|
||||
this.localState = new LocalState({
|
||||
cache: cache,
|
||||
client: this,
|
||||
resolvers: resolvers,
|
||||
fragmentMatcher: fragmentMatcher,
|
||||
});
|
||||
this.queryManager = new QueryManager({
|
||||
cache: this.cache,
|
||||
link: this.link,
|
||||
defaultOptions: this.defaultOptions,
|
||||
defaultContext: defaultContext,
|
||||
documentTransform: documentTransform,
|
||||
queryDeduplication: queryDeduplication,
|
||||
ssrMode: ssrMode,
|
||||
clientAwareness: {
|
||||
name: clientAwarenessName,
|
||||
version: clientAwarenessVersion,
|
||||
},
|
||||
localState: this.localState,
|
||||
assumeImmutableResults: assumeImmutableResults,
|
||||
onBroadcast: connectToDevTools ?
|
||||
function () {
|
||||
if (_this.devToolsHookCb) {
|
||||
_this.devToolsHookCb({
|
||||
action: {},
|
||||
state: {
|
||||
queries: _this.queryManager.getQueryStore(),
|
||||
mutations: _this.queryManager.mutationStore || {},
|
||||
},
|
||||
dataWithOptimisticResults: _this.cache.extract(true),
|
||||
});
|
||||
}
|
||||
}
|
||||
: void 0,
|
||||
});
|
||||
if (connectToDevTools)
|
||||
this.connectToDevTools();
|
||||
}
|
||||
ApolloClient.prototype.connectToDevTools = function () {
|
||||
if (typeof window === "object") {
|
||||
var windowWithDevTools = window;
|
||||
var devtoolsSymbol = Symbol.for("apollo.devtools");
|
||||
(windowWithDevTools[devtoolsSymbol] =
|
||||
windowWithDevTools[devtoolsSymbol] || []).push(this);
|
||||
windowWithDevTools.__APOLLO_CLIENT__ = this;
|
||||
}
|
||||
/**
|
||||
* Suggest installing the devtools for developers who don't have them
|
||||
*/
|
||||
if (!hasSuggestedDevtools && globalThis.__DEV__ !== false) {
|
||||
hasSuggestedDevtools = true;
|
||||
setTimeout(function () {
|
||||
if (typeof window !== "undefined" &&
|
||||
window.document &&
|
||||
window.top === window.self &&
|
||||
!window.__APOLLO_DEVTOOLS_GLOBAL_HOOK__) {
|
||||
var nav = window.navigator;
|
||||
var ua = nav && nav.userAgent;
|
||||
var url = void 0;
|
||||
if (typeof ua === "string") {
|
||||
if (ua.indexOf("Chrome/") > -1) {
|
||||
url =
|
||||
"https://chrome.google.com/webstore/detail/" +
|
||||
"apollo-client-developer-t/jdkknkkbebbapilgoeccciglkfbmbnfm";
|
||||
}
|
||||
else if (ua.indexOf("Firefox/") > -1) {
|
||||
url =
|
||||
"https://addons.mozilla.org/en-US/firefox/addon/apollo-developer-tools/";
|
||||
}
|
||||
}
|
||||
if (url) {
|
||||
globalThis.__DEV__ !== false && invariant.log("Download the Apollo DevTools for a better development " +
|
||||
"experience: %s", url);
|
||||
}
|
||||
}
|
||||
}, 10000);
|
||||
}
|
||||
};
|
||||
Object.defineProperty(ApolloClient.prototype, "documentTransform", {
|
||||
/**
|
||||
* The `DocumentTransform` used to modify GraphQL documents before a request
|
||||
* is made. If a custom `DocumentTransform` is not provided, this will be the
|
||||
* default document transform.
|
||||
*/
|
||||
get: function () {
|
||||
return this.queryManager.documentTransform;
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
/**
|
||||
* Call this method to terminate any active client processes, making it safe
|
||||
* to dispose of this `ApolloClient` instance.
|
||||
*/
|
||||
ApolloClient.prototype.stop = function () {
|
||||
this.queryManager.stop();
|
||||
};
|
||||
/**
|
||||
* This watches the cache store of the query according to the options specified and
|
||||
* returns an {@link ObservableQuery}. We can subscribe to this {@link ObservableQuery} and
|
||||
* receive updated results through a GraphQL observer when the cache store changes.
|
||||
*
|
||||
* Note that this method is not an implementation of GraphQL subscriptions. Rather,
|
||||
* it uses Apollo's store in order to reactively deliver updates to your query results.
|
||||
*
|
||||
* For example, suppose you call watchQuery on a GraphQL query that fetches a person's
|
||||
* first and last name and this person has a particular object identifier, provided by
|
||||
* dataIdFromObject. Later, a different query fetches that same person's
|
||||
* first and last name and the first name has now changed. Then, any observers associated
|
||||
* with the results of the first query will be updated with a new result object.
|
||||
*
|
||||
* Note that if the cache does not change, the subscriber will *not* be notified.
|
||||
*
|
||||
* See [here](https://medium.com/apollo-stack/the-concepts-of-graphql-bc68bd819be3#.3mb0cbcmc) for
|
||||
* a description of store reactivity.
|
||||
*/
|
||||
ApolloClient.prototype.watchQuery = function (options) {
|
||||
if (this.defaultOptions.watchQuery) {
|
||||
options = mergeOptions(this.defaultOptions.watchQuery, options);
|
||||
}
|
||||
// XXX Overwriting options is probably not the best way to do this long term...
|
||||
if (this.disableNetworkFetches &&
|
||||
(options.fetchPolicy === "network-only" ||
|
||||
options.fetchPolicy === "cache-and-network")) {
|
||||
options = __assign(__assign({}, options), { fetchPolicy: "cache-first" });
|
||||
}
|
||||
return this.queryManager.watchQuery(options);
|
||||
};
|
||||
/**
|
||||
* This resolves a single query according to the options specified and
|
||||
* returns a `Promise` which is either resolved with the resulting data
|
||||
* or rejected with an error.
|
||||
*
|
||||
* @param options - An object of type {@link QueryOptions} that allows us to
|
||||
* describe how this query should be treated e.g. whether it should hit the
|
||||
* server at all or just resolve from the cache, etc.
|
||||
*/
|
||||
ApolloClient.prototype.query = function (options) {
|
||||
if (this.defaultOptions.query) {
|
||||
options = mergeOptions(this.defaultOptions.query, options);
|
||||
}
|
||||
invariant(options.fetchPolicy !== "cache-and-network", 16);
|
||||
if (this.disableNetworkFetches && options.fetchPolicy === "network-only") {
|
||||
options = __assign(__assign({}, options), { fetchPolicy: "cache-first" });
|
||||
}
|
||||
return this.queryManager.query(options);
|
||||
};
|
||||
/**
|
||||
* This resolves a single mutation according to the options specified and returns a
|
||||
* Promise which is either resolved with the resulting data or rejected with an
|
||||
* error. In some cases both `data` and `errors` might be undefined, for example
|
||||
* when `errorPolicy` is set to `'ignore'`.
|
||||
*
|
||||
* It takes options as an object with the following keys and values:
|
||||
*/
|
||||
ApolloClient.prototype.mutate = function (options) {
|
||||
if (this.defaultOptions.mutate) {
|
||||
options = mergeOptions(this.defaultOptions.mutate, options);
|
||||
}
|
||||
return this.queryManager.mutate(options);
|
||||
};
|
||||
/**
|
||||
* This subscribes to a graphql subscription according to the options specified and returns an
|
||||
* {@link Observable} which either emits received data or an error.
|
||||
*/
|
||||
ApolloClient.prototype.subscribe = function (options) {
|
||||
return this.queryManager.startGraphQLSubscription(options);
|
||||
};
|
||||
/**
|
||||
* Tries to read some data from the store in the shape of the provided
|
||||
* GraphQL query without making a network request. This method will start at
|
||||
* the root query. To start at a specific id returned by `dataIdFromObject`
|
||||
* use `readFragment`.
|
||||
*
|
||||
* @param optimistic - Set to `true` to allow `readQuery` to return
|
||||
* optimistic results. Is `false` by default.
|
||||
*/
|
||||
ApolloClient.prototype.readQuery = function (options, optimistic) {
|
||||
if (optimistic === void 0) { optimistic = false; }
|
||||
return this.cache.readQuery(options, optimistic);
|
||||
};
|
||||
/**
|
||||
* Tries to read some data from the store in the shape of the provided
|
||||
* GraphQL fragment without making a network request. This method will read a
|
||||
* GraphQL fragment from any arbitrary id that is currently cached, unlike
|
||||
* `readQuery` which will only read from the root query.
|
||||
*
|
||||
* You must pass in a GraphQL document with a single fragment or a document
|
||||
* with multiple fragments that represent what you are reading. If you pass
|
||||
* in a document with multiple fragments then you must also specify a
|
||||
* `fragmentName`.
|
||||
*
|
||||
* @param optimistic - Set to `true` to allow `readFragment` to return
|
||||
* optimistic results. Is `false` by default.
|
||||
*/
|
||||
ApolloClient.prototype.readFragment = function (options, optimistic) {
|
||||
if (optimistic === void 0) { optimistic = false; }
|
||||
return this.cache.readFragment(options, optimistic);
|
||||
};
|
||||
/**
|
||||
* Writes some data in the shape of the provided GraphQL query directly to
|
||||
* the store. This method will start at the root query. To start at a
|
||||
* specific id returned by `dataIdFromObject` then use `writeFragment`.
|
||||
*/
|
||||
ApolloClient.prototype.writeQuery = function (options) {
|
||||
var ref = this.cache.writeQuery(options);
|
||||
if (options.broadcast !== false) {
|
||||
this.queryManager.broadcastQueries();
|
||||
}
|
||||
return ref;
|
||||
};
|
||||
/**
|
||||
* Writes some data in the shape of the provided GraphQL fragment directly to
|
||||
* the store. This method will write to a GraphQL fragment from any arbitrary
|
||||
* id that is currently cached, unlike `writeQuery` which will only write
|
||||
* from the root query.
|
||||
*
|
||||
* You must pass in a GraphQL document with a single fragment or a document
|
||||
* with multiple fragments that represent what you are writing. If you pass
|
||||
* in a document with multiple fragments then you must also specify a
|
||||
* `fragmentName`.
|
||||
*/
|
||||
ApolloClient.prototype.writeFragment = function (options) {
|
||||
var ref = this.cache.writeFragment(options);
|
||||
if (options.broadcast !== false) {
|
||||
this.queryManager.broadcastQueries();
|
||||
}
|
||||
return ref;
|
||||
};
|
||||
ApolloClient.prototype.__actionHookForDevTools = function (cb) {
|
||||
this.devToolsHookCb = cb;
|
||||
};
|
||||
ApolloClient.prototype.__requestRaw = function (payload) {
|
||||
return execute(this.link, payload);
|
||||
};
|
||||
/**
|
||||
* Resets your entire store by clearing out your cache and then re-executing
|
||||
* all of your active queries. This makes it so that you may guarantee that
|
||||
* there is no data left in your store from a time before you called this
|
||||
* method.
|
||||
*
|
||||
* `resetStore()` is useful when your user just logged out. You’ve removed the
|
||||
* user session, and you now want to make sure that any references to data you
|
||||
* might have fetched while the user session was active is gone.
|
||||
*
|
||||
* It is important to remember that `resetStore()` *will* refetch any active
|
||||
* queries. This means that any components that might be mounted will execute
|
||||
* their queries again using your network interface. If you do not want to
|
||||
* re-execute any queries then you should make sure to stop watching any
|
||||
* active queries.
|
||||
*/
|
||||
ApolloClient.prototype.resetStore = function () {
|
||||
var _this = this;
|
||||
return Promise.resolve()
|
||||
.then(function () {
|
||||
return _this.queryManager.clearStore({
|
||||
discardWatches: false,
|
||||
});
|
||||
})
|
||||
.then(function () { return Promise.all(_this.resetStoreCallbacks.map(function (fn) { return fn(); })); })
|
||||
.then(function () { return _this.reFetchObservableQueries(); });
|
||||
};
|
||||
/**
|
||||
* Remove all data from the store. Unlike `resetStore`, `clearStore` will
|
||||
* not refetch any active queries.
|
||||
*/
|
||||
ApolloClient.prototype.clearStore = function () {
|
||||
var _this = this;
|
||||
return Promise.resolve()
|
||||
.then(function () {
|
||||
return _this.queryManager.clearStore({
|
||||
discardWatches: true,
|
||||
});
|
||||
})
|
||||
.then(function () { return Promise.all(_this.clearStoreCallbacks.map(function (fn) { return fn(); })); });
|
||||
};
|
||||
/**
|
||||
* Allows callbacks to be registered that are executed when the store is
|
||||
* reset. `onResetStore` returns an unsubscribe function that can be used
|
||||
* to remove registered callbacks.
|
||||
*/
|
||||
ApolloClient.prototype.onResetStore = function (cb) {
|
||||
var _this = this;
|
||||
this.resetStoreCallbacks.push(cb);
|
||||
return function () {
|
||||
_this.resetStoreCallbacks = _this.resetStoreCallbacks.filter(function (c) { return c !== cb; });
|
||||
};
|
||||
};
|
||||
/**
|
||||
* Allows callbacks to be registered that are executed when the store is
|
||||
* cleared. `onClearStore` returns an unsubscribe function that can be used
|
||||
* to remove registered callbacks.
|
||||
*/
|
||||
ApolloClient.prototype.onClearStore = function (cb) {
|
||||
var _this = this;
|
||||
this.clearStoreCallbacks.push(cb);
|
||||
return function () {
|
||||
_this.clearStoreCallbacks = _this.clearStoreCallbacks.filter(function (c) { return c !== cb; });
|
||||
};
|
||||
};
|
||||
/**
|
||||
* Refetches all of your active queries.
|
||||
*
|
||||
* `reFetchObservableQueries()` is useful if you want to bring the client back to proper state in case of a network outage
|
||||
*
|
||||
* It is important to remember that `reFetchObservableQueries()` *will* refetch any active
|
||||
* queries. This means that any components that might be mounted will execute
|
||||
* their queries again using your network interface. If you do not want to
|
||||
* re-execute any queries then you should make sure to stop watching any
|
||||
* active queries.
|
||||
* Takes optional parameter `includeStandby` which will include queries in standby-mode when refetching.
|
||||
*/
|
||||
ApolloClient.prototype.reFetchObservableQueries = function (includeStandby) {
|
||||
return this.queryManager.reFetchObservableQueries(includeStandby);
|
||||
};
|
||||
/**
|
||||
* Refetches specified active queries. Similar to "reFetchObservableQueries()" but with a specific list of queries.
|
||||
*
|
||||
* `refetchQueries()` is useful for use cases to imperatively refresh a selection of queries.
|
||||
*
|
||||
* It is important to remember that `refetchQueries()` *will* refetch specified active
|
||||
* queries. This means that any components that might be mounted will execute
|
||||
* their queries again using your network interface. If you do not want to
|
||||
* re-execute any queries then you should make sure to stop watching any
|
||||
* active queries.
|
||||
*/
|
||||
ApolloClient.prototype.refetchQueries = function (options) {
|
||||
var map = this.queryManager.refetchQueries(options);
|
||||
var queries = [];
|
||||
var results = [];
|
||||
map.forEach(function (result, obsQuery) {
|
||||
queries.push(obsQuery);
|
||||
results.push(result);
|
||||
});
|
||||
var result = Promise.all(results);
|
||||
// In case you need the raw results immediately, without awaiting
|
||||
// Promise.all(results):
|
||||
result.queries = queries;
|
||||
result.results = results;
|
||||
// If you decide to ignore the result Promise because you're using
|
||||
// result.queries and result.results instead, you shouldn't have to worry
|
||||
// about preventing uncaught rejections for the Promise.all result.
|
||||
result.catch(function (error) {
|
||||
globalThis.__DEV__ !== false && invariant.debug(17, error);
|
||||
});
|
||||
return result;
|
||||
};
|
||||
/**
|
||||
* Get all currently active `ObservableQuery` objects, in a `Map` keyed by
|
||||
* query ID strings.
|
||||
*
|
||||
* An "active" query is one that has observers and a `fetchPolicy` other than
|
||||
* "standby" or "cache-only".
|
||||
*
|
||||
* You can include all `ObservableQuery` objects (including the inactive ones)
|
||||
* by passing "all" instead of "active", or you can include just a subset of
|
||||
* active queries by passing an array of query names or DocumentNode objects.
|
||||
*/
|
||||
ApolloClient.prototype.getObservableQueries = function (include) {
|
||||
if (include === void 0) { include = "active"; }
|
||||
return this.queryManager.getObservableQueries(include);
|
||||
};
|
||||
/**
|
||||
* Exposes the cache's complete state, in a serializable format for later restoration.
|
||||
*/
|
||||
ApolloClient.prototype.extract = function (optimistic) {
|
||||
return this.cache.extract(optimistic);
|
||||
};
|
||||
/**
|
||||
* Replaces existing state in the cache (if any) with the values expressed by
|
||||
* `serializedState`.
|
||||
*
|
||||
* Called when hydrating a cache (server side rendering, or offline storage),
|
||||
* and also (potentially) during hot reloads.
|
||||
*/
|
||||
ApolloClient.prototype.restore = function (serializedState) {
|
||||
return this.cache.restore(serializedState);
|
||||
};
|
||||
/**
|
||||
* Add additional local resolvers.
|
||||
*/
|
||||
ApolloClient.prototype.addResolvers = function (resolvers) {
|
||||
this.localState.addResolvers(resolvers);
|
||||
};
|
||||
/**
|
||||
* Set (override existing) local resolvers.
|
||||
*/
|
||||
ApolloClient.prototype.setResolvers = function (resolvers) {
|
||||
this.localState.setResolvers(resolvers);
|
||||
};
|
||||
/**
|
||||
* Get all registered local resolvers.
|
||||
*/
|
||||
ApolloClient.prototype.getResolvers = function () {
|
||||
return this.localState.getResolvers();
|
||||
};
|
||||
/**
|
||||
* Set a custom local state fragment matcher.
|
||||
*/
|
||||
ApolloClient.prototype.setLocalStateFragmentMatcher = function (fragmentMatcher) {
|
||||
this.localState.setFragmentMatcher(fragmentMatcher);
|
||||
};
|
||||
/**
|
||||
* Define a new ApolloLink (or link chain) that Apollo Client will use.
|
||||
*/
|
||||
ApolloClient.prototype.setLink = function (newLink) {
|
||||
this.link = this.queryManager.link = newLink;
|
||||
};
|
||||
Object.defineProperty(ApolloClient.prototype, "defaultContext", {
|
||||
get: function () {
|
||||
return this.queryManager.defaultContext;
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
return ApolloClient;
|
||||
}());
|
||||
export { ApolloClient };
|
||||
if (globalThis.__DEV__ !== false) {
|
||||
ApolloClient.prototype.getMemoryInternals = getApolloClientMemoryInternals;
|
||||
}
|
||||
//# sourceMappingURL=ApolloClient.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/core/ApolloClient.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/ApolloClient.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
65
graphql-subscription/node_modules/@apollo/client/core/LocalState.d.ts
generated
vendored
Normal file
65
graphql-subscription/node_modules/@apollo/client/core/LocalState.d.ts
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
import type { DocumentNode, SelectionNode, FieldNode, ASTNode } from "graphql";
|
||||
import type { ApolloCache } from "../cache/index.js";
|
||||
import type { FragmentMap, StoreObject } from "../utilities/index.js";
|
||||
import type { ApolloClient } from "./ApolloClient.js";
|
||||
import type { Resolvers, OperationVariables } from "./types.js";
|
||||
import type { FetchResult } from "../link/core/index.js";
|
||||
export type Resolver = (rootValue?: any, args?: any, context?: any, info?: {
|
||||
field: FieldNode;
|
||||
fragmentMap: FragmentMap;
|
||||
}) => any;
|
||||
export type VariableMap = {
|
||||
[name: string]: any;
|
||||
};
|
||||
export type FragmentMatcher = (rootValue: any, typeCondition: string, context: any) => boolean;
|
||||
export type ExecContext = {
|
||||
fragmentMap: FragmentMap;
|
||||
context: any;
|
||||
variables: VariableMap;
|
||||
fragmentMatcher: FragmentMatcher;
|
||||
defaultOperationType: string;
|
||||
exportedVariables: Record<string, any>;
|
||||
onlyRunForcedResolvers: boolean;
|
||||
selectionsToResolve: Set<SelectionNode>;
|
||||
};
|
||||
export type LocalStateOptions<TCacheShape> = {
|
||||
cache: ApolloCache<TCacheShape>;
|
||||
client?: ApolloClient<TCacheShape>;
|
||||
resolvers?: Resolvers | Resolvers[];
|
||||
fragmentMatcher?: FragmentMatcher;
|
||||
};
|
||||
export declare class LocalState<TCacheShape> {
|
||||
private cache;
|
||||
private client?;
|
||||
private resolvers?;
|
||||
private fragmentMatcher?;
|
||||
private selectionsToResolveCache;
|
||||
constructor({ cache, client, resolvers, fragmentMatcher, }: LocalStateOptions<TCacheShape>);
|
||||
addResolvers(resolvers: Resolvers | Resolvers[]): void;
|
||||
setResolvers(resolvers: Resolvers | Resolvers[]): void;
|
||||
getResolvers(): Resolvers;
|
||||
runResolvers<TData>({ document, remoteResult, context, variables, onlyRunForcedResolvers, }: {
|
||||
document: DocumentNode | null;
|
||||
remoteResult: FetchResult<TData>;
|
||||
context?: Record<string, any>;
|
||||
variables?: Record<string, any>;
|
||||
onlyRunForcedResolvers?: boolean;
|
||||
}): Promise<FetchResult<TData>>;
|
||||
setFragmentMatcher(fragmentMatcher: FragmentMatcher): void;
|
||||
getFragmentMatcher(): FragmentMatcher | undefined;
|
||||
clientQuery(document: DocumentNode): DocumentNode | null;
|
||||
serverQuery(document: DocumentNode): DocumentNode | null;
|
||||
prepareContext(context?: Record<string, any>): {
|
||||
cache: ApolloCache<TCacheShape>;
|
||||
getCacheKey(obj: StoreObject): string | undefined;
|
||||
};
|
||||
addExportedVariables<TVars extends OperationVariables>(document: DocumentNode, variables?: TVars, context?: {}): Promise<TVars>;
|
||||
shouldForceResolvers(document: ASTNode): boolean;
|
||||
private buildRootValueFromCache;
|
||||
private resolveDocument;
|
||||
private resolveSelectionSet;
|
||||
private resolveField;
|
||||
private resolveSubSelectedArray;
|
||||
private collectSelectionsToResolve;
|
||||
}
|
||||
//# sourceMappingURL=LocalState.d.ts.map
|
||||
356
graphql-subscription/node_modules/@apollo/client/core/LocalState.js
generated
vendored
Normal file
356
graphql-subscription/node_modules/@apollo/client/core/LocalState.js
generated
vendored
Normal file
@@ -0,0 +1,356 @@
|
||||
import { __assign, __awaiter, __generator } from "tslib";
|
||||
import { invariant } from "../utilities/globals/index.js";
|
||||
import { visit, BREAK, isSelectionNode } from "graphql";
|
||||
import { argumentsObjectFromField, buildQueryFromSelectionSet, createFragmentMap, getFragmentDefinitions, getMainDefinition, hasDirectives, isField, isInlineFragment, mergeDeep, mergeDeepArray, removeClientSetsFromDocument, resultKeyNameFromField, shouldInclude, } from "../utilities/index.js";
|
||||
import { cacheSlot } from "../cache/index.js";
|
||||
var LocalState = /** @class */ (function () {
|
||||
function LocalState(_a) {
|
||||
var cache = _a.cache, client = _a.client, resolvers = _a.resolvers, fragmentMatcher = _a.fragmentMatcher;
|
||||
this.selectionsToResolveCache = new WeakMap();
|
||||
this.cache = cache;
|
||||
if (client) {
|
||||
this.client = client;
|
||||
}
|
||||
if (resolvers) {
|
||||
this.addResolvers(resolvers);
|
||||
}
|
||||
if (fragmentMatcher) {
|
||||
this.setFragmentMatcher(fragmentMatcher);
|
||||
}
|
||||
}
|
||||
LocalState.prototype.addResolvers = function (resolvers) {
|
||||
var _this = this;
|
||||
this.resolvers = this.resolvers || {};
|
||||
if (Array.isArray(resolvers)) {
|
||||
resolvers.forEach(function (resolverGroup) {
|
||||
_this.resolvers = mergeDeep(_this.resolvers, resolverGroup);
|
||||
});
|
||||
}
|
||||
else {
|
||||
this.resolvers = mergeDeep(this.resolvers, resolvers);
|
||||
}
|
||||
};
|
||||
LocalState.prototype.setResolvers = function (resolvers) {
|
||||
this.resolvers = {};
|
||||
this.addResolvers(resolvers);
|
||||
};
|
||||
LocalState.prototype.getResolvers = function () {
|
||||
return this.resolvers || {};
|
||||
};
|
||||
// Run local client resolvers against the incoming query and remote data.
|
||||
// Locally resolved field values are merged with the incoming remote data,
|
||||
// and returned. Note that locally resolved fields will overwrite
|
||||
// remote data using the same field name.
|
||||
LocalState.prototype.runResolvers = function (_a) {
|
||||
var document = _a.document, remoteResult = _a.remoteResult, context = _a.context, variables = _a.variables, _b = _a.onlyRunForcedResolvers, onlyRunForcedResolvers = _b === void 0 ? false : _b;
|
||||
return __awaiter(this, void 0, void 0, function () {
|
||||
return __generator(this, function (_c) {
|
||||
if (document) {
|
||||
return [2 /*return*/, this.resolveDocument(document, remoteResult.data, context, variables, this.fragmentMatcher, onlyRunForcedResolvers).then(function (localResult) { return (__assign(__assign({}, remoteResult), { data: localResult.result })); })];
|
||||
}
|
||||
return [2 /*return*/, remoteResult];
|
||||
});
|
||||
});
|
||||
};
|
||||
LocalState.prototype.setFragmentMatcher = function (fragmentMatcher) {
|
||||
this.fragmentMatcher = fragmentMatcher;
|
||||
};
|
||||
LocalState.prototype.getFragmentMatcher = function () {
|
||||
return this.fragmentMatcher;
|
||||
};
|
||||
// Client queries contain everything in the incoming document (if a @client
|
||||
// directive is found).
|
||||
LocalState.prototype.clientQuery = function (document) {
|
||||
if (hasDirectives(["client"], document)) {
|
||||
if (this.resolvers) {
|
||||
return document;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
};
|
||||
// Server queries are stripped of all @client based selection sets.
|
||||
LocalState.prototype.serverQuery = function (document) {
|
||||
return removeClientSetsFromDocument(document);
|
||||
};
|
||||
LocalState.prototype.prepareContext = function (context) {
|
||||
var cache = this.cache;
|
||||
return __assign(__assign({}, context), { cache: cache,
|
||||
// Getting an entry's cache key is useful for local state resolvers.
|
||||
getCacheKey: function (obj) {
|
||||
return cache.identify(obj);
|
||||
} });
|
||||
};
|
||||
// To support `@client @export(as: "someVar")` syntax, we'll first resolve
|
||||
// @client @export fields locally, then pass the resolved values back to be
|
||||
// used alongside the original operation variables.
|
||||
LocalState.prototype.addExportedVariables = function (document, variables, context) {
|
||||
if (variables === void 0) { variables = {}; }
|
||||
if (context === void 0) { context = {}; }
|
||||
return __awaiter(this, void 0, void 0, function () {
|
||||
return __generator(this, function (_a) {
|
||||
if (document) {
|
||||
return [2 /*return*/, this.resolveDocument(document, this.buildRootValueFromCache(document, variables) || {}, this.prepareContext(context), variables).then(function (data) { return (__assign(__assign({}, variables), data.exportedVariables)); })];
|
||||
}
|
||||
return [2 /*return*/, __assign({}, variables)];
|
||||
});
|
||||
});
|
||||
};
|
||||
LocalState.prototype.shouldForceResolvers = function (document) {
|
||||
var forceResolvers = false;
|
||||
visit(document, {
|
||||
Directive: {
|
||||
enter: function (node) {
|
||||
if (node.name.value === "client" && node.arguments) {
|
||||
forceResolvers = node.arguments.some(function (arg) {
|
||||
return arg.name.value === "always" &&
|
||||
arg.value.kind === "BooleanValue" &&
|
||||
arg.value.value === true;
|
||||
});
|
||||
if (forceResolvers) {
|
||||
return BREAK;
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
});
|
||||
return forceResolvers;
|
||||
};
|
||||
// Query the cache and return matching data.
|
||||
LocalState.prototype.buildRootValueFromCache = function (document, variables) {
|
||||
return this.cache.diff({
|
||||
query: buildQueryFromSelectionSet(document),
|
||||
variables: variables,
|
||||
returnPartialData: true,
|
||||
optimistic: false,
|
||||
}).result;
|
||||
};
|
||||
LocalState.prototype.resolveDocument = function (document, rootValue, context, variables, fragmentMatcher, onlyRunForcedResolvers) {
|
||||
if (context === void 0) { context = {}; }
|
||||
if (variables === void 0) { variables = {}; }
|
||||
if (fragmentMatcher === void 0) { fragmentMatcher = function () { return true; }; }
|
||||
if (onlyRunForcedResolvers === void 0) { onlyRunForcedResolvers = false; }
|
||||
return __awaiter(this, void 0, void 0, function () {
|
||||
var mainDefinition, fragments, fragmentMap, selectionsToResolve, definitionOperation, defaultOperationType, _a, cache, client, execContext, isClientFieldDescendant;
|
||||
return __generator(this, function (_b) {
|
||||
mainDefinition = getMainDefinition(document);
|
||||
fragments = getFragmentDefinitions(document);
|
||||
fragmentMap = createFragmentMap(fragments);
|
||||
selectionsToResolve = this.collectSelectionsToResolve(mainDefinition, fragmentMap);
|
||||
definitionOperation = mainDefinition.operation;
|
||||
defaultOperationType = definitionOperation ?
|
||||
definitionOperation.charAt(0).toUpperCase() +
|
||||
definitionOperation.slice(1)
|
||||
: "Query";
|
||||
_a = this, cache = _a.cache, client = _a.client;
|
||||
execContext = {
|
||||
fragmentMap: fragmentMap,
|
||||
context: __assign(__assign({}, context), { cache: cache, client: client }),
|
||||
variables: variables,
|
||||
fragmentMatcher: fragmentMatcher,
|
||||
defaultOperationType: defaultOperationType,
|
||||
exportedVariables: {},
|
||||
selectionsToResolve: selectionsToResolve,
|
||||
onlyRunForcedResolvers: onlyRunForcedResolvers,
|
||||
};
|
||||
isClientFieldDescendant = false;
|
||||
return [2 /*return*/, this.resolveSelectionSet(mainDefinition.selectionSet, isClientFieldDescendant, rootValue, execContext).then(function (result) { return ({
|
||||
result: result,
|
||||
exportedVariables: execContext.exportedVariables,
|
||||
}); })];
|
||||
});
|
||||
});
|
||||
};
|
||||
LocalState.prototype.resolveSelectionSet = function (selectionSet, isClientFieldDescendant, rootValue, execContext) {
|
||||
return __awaiter(this, void 0, void 0, function () {
|
||||
var fragmentMap, context, variables, resultsToMerge, execute;
|
||||
var _this = this;
|
||||
return __generator(this, function (_a) {
|
||||
fragmentMap = execContext.fragmentMap, context = execContext.context, variables = execContext.variables;
|
||||
resultsToMerge = [rootValue];
|
||||
execute = function (selection) { return __awaiter(_this, void 0, void 0, function () {
|
||||
var fragment, typeCondition;
|
||||
return __generator(this, function (_a) {
|
||||
if (!isClientFieldDescendant &&
|
||||
!execContext.selectionsToResolve.has(selection)) {
|
||||
// Skip selections without @client directives
|
||||
// (still processing if one of the ancestors or one of the child fields has @client directive)
|
||||
return [2 /*return*/];
|
||||
}
|
||||
if (!shouldInclude(selection, variables)) {
|
||||
// Skip this entirely.
|
||||
return [2 /*return*/];
|
||||
}
|
||||
if (isField(selection)) {
|
||||
return [2 /*return*/, this.resolveField(selection, isClientFieldDescendant, rootValue, execContext).then(function (fieldResult) {
|
||||
var _a;
|
||||
if (typeof fieldResult !== "undefined") {
|
||||
resultsToMerge.push((_a = {},
|
||||
_a[resultKeyNameFromField(selection)] = fieldResult,
|
||||
_a));
|
||||
}
|
||||
})];
|
||||
}
|
||||
if (isInlineFragment(selection)) {
|
||||
fragment = selection;
|
||||
}
|
||||
else {
|
||||
// This is a named fragment.
|
||||
fragment = fragmentMap[selection.name.value];
|
||||
invariant(fragment, 18, selection.name.value);
|
||||
}
|
||||
if (fragment && fragment.typeCondition) {
|
||||
typeCondition = fragment.typeCondition.name.value;
|
||||
if (execContext.fragmentMatcher(rootValue, typeCondition, context)) {
|
||||
return [2 /*return*/, this.resolveSelectionSet(fragment.selectionSet, isClientFieldDescendant, rootValue, execContext).then(function (fragmentResult) {
|
||||
resultsToMerge.push(fragmentResult);
|
||||
})];
|
||||
}
|
||||
}
|
||||
return [2 /*return*/];
|
||||
});
|
||||
}); };
|
||||
return [2 /*return*/, Promise.all(selectionSet.selections.map(execute)).then(function () {
|
||||
return mergeDeepArray(resultsToMerge);
|
||||
})];
|
||||
});
|
||||
});
|
||||
};
|
||||
LocalState.prototype.resolveField = function (field, isClientFieldDescendant, rootValue, execContext) {
|
||||
return __awaiter(this, void 0, void 0, function () {
|
||||
var variables, fieldName, aliasedFieldName, aliasUsed, defaultResult, resultPromise, resolverType, resolverMap, resolve;
|
||||
var _this = this;
|
||||
return __generator(this, function (_a) {
|
||||
if (!rootValue) {
|
||||
return [2 /*return*/, null];
|
||||
}
|
||||
variables = execContext.variables;
|
||||
fieldName = field.name.value;
|
||||
aliasedFieldName = resultKeyNameFromField(field);
|
||||
aliasUsed = fieldName !== aliasedFieldName;
|
||||
defaultResult = rootValue[aliasedFieldName] || rootValue[fieldName];
|
||||
resultPromise = Promise.resolve(defaultResult);
|
||||
// Usually all local resolvers are run when passing through here, but
|
||||
// if we've specifically identified that we only want to run forced
|
||||
// resolvers (that is, resolvers for fields marked with
|
||||
// `@client(always: true)`), then we'll skip running non-forced resolvers.
|
||||
if (!execContext.onlyRunForcedResolvers ||
|
||||
this.shouldForceResolvers(field)) {
|
||||
resolverType = rootValue.__typename || execContext.defaultOperationType;
|
||||
resolverMap = this.resolvers && this.resolvers[resolverType];
|
||||
if (resolverMap) {
|
||||
resolve = resolverMap[aliasUsed ? fieldName : aliasedFieldName];
|
||||
if (resolve) {
|
||||
resultPromise = Promise.resolve(
|
||||
// In case the resolve function accesses reactive variables,
|
||||
// set cacheSlot to the current cache instance.
|
||||
cacheSlot.withValue(this.cache, resolve, [
|
||||
rootValue,
|
||||
argumentsObjectFromField(field, variables),
|
||||
execContext.context,
|
||||
{ field: field, fragmentMap: execContext.fragmentMap },
|
||||
]));
|
||||
}
|
||||
}
|
||||
}
|
||||
return [2 /*return*/, resultPromise.then(function (result) {
|
||||
var _a, _b;
|
||||
if (result === void 0) { result = defaultResult; }
|
||||
// If an @export directive is associated with the current field, store
|
||||
// the `as` export variable name and current result for later use.
|
||||
if (field.directives) {
|
||||
field.directives.forEach(function (directive) {
|
||||
if (directive.name.value === "export" && directive.arguments) {
|
||||
directive.arguments.forEach(function (arg) {
|
||||
if (arg.name.value === "as" && arg.value.kind === "StringValue") {
|
||||
execContext.exportedVariables[arg.value.value] = result;
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
// Handle all scalar types here.
|
||||
if (!field.selectionSet) {
|
||||
return result;
|
||||
}
|
||||
// From here down, the field has a selection set, which means it's trying
|
||||
// to query a GraphQLObjectType.
|
||||
if (result == null) {
|
||||
// Basically any field in a GraphQL response can be null, or missing
|
||||
return result;
|
||||
}
|
||||
var isClientField = (_b = (_a = field.directives) === null || _a === void 0 ? void 0 : _a.some(function (d) { return d.name.value === "client"; })) !== null && _b !== void 0 ? _b : false;
|
||||
if (Array.isArray(result)) {
|
||||
return _this.resolveSubSelectedArray(field, isClientFieldDescendant || isClientField, result, execContext);
|
||||
}
|
||||
// Returned value is an object, and the query has a sub-selection. Recurse.
|
||||
if (field.selectionSet) {
|
||||
return _this.resolveSelectionSet(field.selectionSet, isClientFieldDescendant || isClientField, result, execContext);
|
||||
}
|
||||
})];
|
||||
});
|
||||
});
|
||||
};
|
||||
LocalState.prototype.resolveSubSelectedArray = function (field, isClientFieldDescendant, result, execContext) {
|
||||
var _this = this;
|
||||
return Promise.all(result.map(function (item) {
|
||||
if (item === null) {
|
||||
return null;
|
||||
}
|
||||
// This is a nested array, recurse.
|
||||
if (Array.isArray(item)) {
|
||||
return _this.resolveSubSelectedArray(field, isClientFieldDescendant, item, execContext);
|
||||
}
|
||||
// This is an object, run the selection set on it.
|
||||
if (field.selectionSet) {
|
||||
return _this.resolveSelectionSet(field.selectionSet, isClientFieldDescendant, item, execContext);
|
||||
}
|
||||
}));
|
||||
};
|
||||
// Collect selection nodes on paths from document root down to all @client directives.
|
||||
// This function takes into account transitive fragment spreads.
|
||||
// Complexity equals to a single `visit` over the full document.
|
||||
LocalState.prototype.collectSelectionsToResolve = function (mainDefinition, fragmentMap) {
|
||||
var isSingleASTNode = function (node) { return !Array.isArray(node); };
|
||||
var selectionsToResolveCache = this.selectionsToResolveCache;
|
||||
function collectByDefinition(definitionNode) {
|
||||
if (!selectionsToResolveCache.has(definitionNode)) {
|
||||
var matches_1 = new Set();
|
||||
selectionsToResolveCache.set(definitionNode, matches_1);
|
||||
visit(definitionNode, {
|
||||
Directive: function (node, _, __, ___, ancestors) {
|
||||
if (node.name.value === "client") {
|
||||
ancestors.forEach(function (node) {
|
||||
if (isSingleASTNode(node) && isSelectionNode(node)) {
|
||||
matches_1.add(node);
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
FragmentSpread: function (spread, _, __, ___, ancestors) {
|
||||
var fragment = fragmentMap[spread.name.value];
|
||||
invariant(fragment, 19, spread.name.value);
|
||||
var fragmentSelections = collectByDefinition(fragment);
|
||||
if (fragmentSelections.size > 0) {
|
||||
// Fragment for this spread contains @client directive (either directly or transitively)
|
||||
// Collect selection nodes on paths from the root down to fields with the @client directive
|
||||
ancestors.forEach(function (node) {
|
||||
if (isSingleASTNode(node) && isSelectionNode(node)) {
|
||||
matches_1.add(node);
|
||||
}
|
||||
});
|
||||
matches_1.add(spread);
|
||||
fragmentSelections.forEach(function (selection) {
|
||||
matches_1.add(selection);
|
||||
});
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
return selectionsToResolveCache.get(definitionNode);
|
||||
}
|
||||
return collectByDefinition(mainDefinition);
|
||||
};
|
||||
return LocalState;
|
||||
}());
|
||||
export { LocalState };
|
||||
//# sourceMappingURL=LocalState.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/core/LocalState.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/LocalState.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
130
graphql-subscription/node_modules/@apollo/client/core/ObservableQuery.d.ts
generated
vendored
Normal file
130
graphql-subscription/node_modules/@apollo/client/core/ObservableQuery.d.ts
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
import { NetworkStatus } from "./networkStatus.js";
|
||||
import type { Concast, Observer, ObservableSubscription } from "../utilities/index.js";
|
||||
import { Observable } from "../utilities/index.js";
|
||||
import type { ApolloError } from "../errors/index.js";
|
||||
import type { QueryManager } from "./QueryManager.js";
|
||||
import type { ApolloQueryResult, OperationVariables, TypedDocumentNode } from "./types.js";
|
||||
import type { WatchQueryOptions, FetchMoreQueryOptions, SubscribeToMoreOptions } from "./watchQueryOptions.js";
|
||||
import type { QueryInfo } from "./QueryInfo.js";
|
||||
import type { MissingFieldError } from "../cache/index.js";
|
||||
import type { MissingTree } from "../cache/core/types/common.js";
|
||||
export interface FetchMoreOptions<TData = any, TVariables = OperationVariables> {
|
||||
updateQuery?: (previousQueryResult: TData, options: {
|
||||
fetchMoreResult?: TData;
|
||||
variables?: TVariables;
|
||||
}) => TData;
|
||||
}
|
||||
export interface UpdateQueryOptions<TVariables> {
|
||||
variables?: TVariables;
|
||||
}
|
||||
export declare class ObservableQuery<TData = any, TVariables extends OperationVariables = OperationVariables> extends Observable<ApolloQueryResult<TData>> {
|
||||
readonly options: WatchQueryOptions<TVariables, TData>;
|
||||
readonly queryId: string;
|
||||
readonly queryName?: string;
|
||||
get query(): TypedDocumentNode<TData, TVariables>;
|
||||
/**
|
||||
* An object containing the variables that were provided for the query.
|
||||
*/
|
||||
get variables(): TVariables | undefined;
|
||||
private isTornDown;
|
||||
private queryManager;
|
||||
private observers;
|
||||
private subscriptions;
|
||||
private waitForOwnResult;
|
||||
private last?;
|
||||
private lastQuery?;
|
||||
private queryInfo;
|
||||
private concast?;
|
||||
private observer?;
|
||||
private pollingInfo?;
|
||||
constructor({ queryManager, queryInfo, options, }: {
|
||||
queryManager: QueryManager<any>;
|
||||
queryInfo: QueryInfo;
|
||||
options: WatchQueryOptions<TVariables, TData>;
|
||||
});
|
||||
result(): Promise<ApolloQueryResult<TData>>;
|
||||
/** @internal */
|
||||
resetDiff(): void;
|
||||
getCurrentResult(saveAsLastResult?: boolean): ApolloQueryResult<TData>;
|
||||
isDifferentFromLastResult(newResult: ApolloQueryResult<TData>, variables?: TVariables): boolean | undefined;
|
||||
private getLast;
|
||||
getLastResult(variablesMustMatch?: boolean): ApolloQueryResult<TData> | undefined;
|
||||
getLastError(variablesMustMatch?: boolean): ApolloError | undefined;
|
||||
resetLastResults(): void;
|
||||
resetQueryStoreErrors(): void;
|
||||
/**
|
||||
* Update the variables of this observable query, and fetch the new results.
|
||||
* This method should be preferred over `setVariables` in most use cases.
|
||||
*
|
||||
* @param variables - The new set of variables. If there are missing variables,
|
||||
* the previous values of those variables will be used.
|
||||
*/
|
||||
refetch(variables?: Partial<TVariables>): Promise<ApolloQueryResult<TData>>;
|
||||
/**
|
||||
* A function that helps you fetch the next set of results for a [paginated list field](https://www.apollographql.com/docs/react/pagination/core-api/).
|
||||
*/
|
||||
fetchMore<TFetchData = TData, TFetchVars extends OperationVariables = TVariables>(fetchMoreOptions: FetchMoreQueryOptions<TFetchVars, TFetchData> & {
|
||||
updateQuery?: (previousQueryResult: TData, options: {
|
||||
fetchMoreResult: TFetchData;
|
||||
variables: TFetchVars;
|
||||
}) => TData;
|
||||
}): Promise<ApolloQueryResult<TFetchData>>;
|
||||
/**
|
||||
* A function that enables you to execute a [subscription](https://www.apollographql.com/docs/react/data/subscriptions/), usually to subscribe to specific fields that were included in the query.
|
||||
*
|
||||
* This function returns _another_ function that you can call to terminate the subscription.
|
||||
*/
|
||||
subscribeToMore<TSubscriptionData = TData, TSubscriptionVariables extends OperationVariables = TVariables>(options: SubscribeToMoreOptions<TData, TSubscriptionVariables, TSubscriptionData>): () => void;
|
||||
setOptions(newOptions: Partial<WatchQueryOptions<TVariables, TData>>): Promise<ApolloQueryResult<TData>>;
|
||||
silentSetOptions(newOptions: Partial<WatchQueryOptions<TVariables, TData>>): void;
|
||||
/**
|
||||
* Update the variables of this observable query, and fetch the new results
|
||||
* if they've changed. Most users should prefer `refetch` instead of
|
||||
* `setVariables` in order to to be properly notified of results even when
|
||||
* they come from the cache.
|
||||
*
|
||||
* Note: the `next` callback will *not* fire if the variables have not changed
|
||||
* or if the result is coming from cache.
|
||||
*
|
||||
* Note: the promise will return the old results immediately if the variables
|
||||
* have not changed.
|
||||
*
|
||||
* Note: the promise will return null immediately if the query is not active
|
||||
* (there are no subscribers).
|
||||
*
|
||||
* @param variables - The new set of variables. If there are missing variables,
|
||||
* the previous values of those variables will be used.
|
||||
*/
|
||||
setVariables(variables: TVariables): Promise<ApolloQueryResult<TData> | void>;
|
||||
/**
|
||||
* A function that enables you to update the query's cached result without executing a followup GraphQL operation.
|
||||
*
|
||||
* See [using updateQuery and updateFragment](https://www.apollographql.com/docs/react/caching/cache-interaction/#using-updatequery-and-updatefragment) for additional information.
|
||||
*/
|
||||
updateQuery<TVars extends OperationVariables = TVariables>(mapFn: (previousQueryResult: TData, options: Pick<WatchQueryOptions<TVars, TData>, "variables">) => TData): void;
|
||||
/**
|
||||
* A function that instructs the query to begin re-executing at a specified interval (in milliseconds).
|
||||
*/
|
||||
startPolling(pollInterval: number): void;
|
||||
/**
|
||||
* A function that instructs the query to stop polling after a previous call to `startPolling`.
|
||||
*/
|
||||
stopPolling(): void;
|
||||
private applyNextFetchPolicy;
|
||||
private fetch;
|
||||
private updatePolling;
|
||||
private updateLastResult;
|
||||
reobserveAsConcast(newOptions?: Partial<WatchQueryOptions<TVariables, TData>>, newNetworkStatus?: NetworkStatus): Concast<ApolloQueryResult<TData>>;
|
||||
reobserve(newOptions?: Partial<WatchQueryOptions<TVariables, TData>>, newNetworkStatus?: NetworkStatus): Promise<ApolloQueryResult<TData>>;
|
||||
resubscribeAfterError(onNext: (value: ApolloQueryResult<TData>) => void, onError?: (error: any) => void, onComplete?: () => void): ObservableSubscription;
|
||||
resubscribeAfterError(observer: Observer<ApolloQueryResult<TData>>): ObservableSubscription;
|
||||
private observe;
|
||||
private reportResult;
|
||||
private reportError;
|
||||
hasObservers(): boolean;
|
||||
private tearDownQuery;
|
||||
private transformDocument;
|
||||
}
|
||||
export declare function reobserveCacheFirst<TData, TVars extends OperationVariables>(obsQuery: ObservableQuery<TData, TVars>): Promise<ApolloQueryResult<TData>>;
|
||||
export declare function logMissingFieldErrors(missing: MissingFieldError[] | MissingTree | undefined): void;
|
||||
//# sourceMappingURL=ObservableQuery.d.ts.map
|
||||
803
graphql-subscription/node_modules/@apollo/client/core/ObservableQuery.js
generated
vendored
Normal file
803
graphql-subscription/node_modules/@apollo/client/core/ObservableQuery.js
generated
vendored
Normal file
@@ -0,0 +1,803 @@
|
||||
import { __assign, __extends } from "tslib";
|
||||
import { invariant } from "../utilities/globals/index.js";
|
||||
import { equal } from "@wry/equality";
|
||||
import { NetworkStatus, isNetworkRequestInFlight } from "./networkStatus.js";
|
||||
import { cloneDeep, compact, getOperationDefinition, Observable, iterateObserversSafely, fixObservableSubclass, getQueryDefinition, } from "../utilities/index.js";
|
||||
import { equalByQuery } from "./equalByQuery.js";
|
||||
var assign = Object.assign, hasOwnProperty = Object.hasOwnProperty;
|
||||
var ObservableQuery = /** @class */ (function (_super) {
|
||||
__extends(ObservableQuery, _super);
|
||||
function ObservableQuery(_a) {
|
||||
var queryManager = _a.queryManager, queryInfo = _a.queryInfo, options = _a.options;
|
||||
var _this = _super.call(this, function (observer) {
|
||||
// Zen Observable has its own error function, so in order to log correctly
|
||||
// we need to provide a custom error callback.
|
||||
try {
|
||||
var subObserver = observer._subscription._observer;
|
||||
if (subObserver && !subObserver.error) {
|
||||
subObserver.error = defaultSubscriptionObserverErrorCallback;
|
||||
}
|
||||
}
|
||||
catch (_a) { }
|
||||
var first = !_this.observers.size;
|
||||
_this.observers.add(observer);
|
||||
// Deliver most recent error or result.
|
||||
var last = _this.last;
|
||||
if (last && last.error) {
|
||||
observer.error && observer.error(last.error);
|
||||
}
|
||||
else if (last && last.result) {
|
||||
observer.next && observer.next(last.result);
|
||||
}
|
||||
// Initiate observation of this query if it hasn't been reported to
|
||||
// the QueryManager yet.
|
||||
if (first) {
|
||||
// Blindly catching here prevents unhandled promise rejections,
|
||||
// and is safe because the ObservableQuery handles this error with
|
||||
// this.observer.error, so we're not just swallowing the error by
|
||||
// ignoring it here.
|
||||
_this.reobserve().catch(function () { });
|
||||
}
|
||||
return function () {
|
||||
if (_this.observers.delete(observer) && !_this.observers.size) {
|
||||
_this.tearDownQuery();
|
||||
}
|
||||
};
|
||||
}) || this;
|
||||
_this.observers = new Set();
|
||||
_this.subscriptions = new Set();
|
||||
// related classes
|
||||
_this.queryInfo = queryInfo;
|
||||
_this.queryManager = queryManager;
|
||||
// active state
|
||||
_this.waitForOwnResult = skipCacheDataFor(options.fetchPolicy);
|
||||
_this.isTornDown = false;
|
||||
var _b = queryManager.defaultOptions.watchQuery, _c = _b === void 0 ? {} : _b, _d = _c.fetchPolicy, defaultFetchPolicy = _d === void 0 ? "cache-first" : _d;
|
||||
var _e = options.fetchPolicy, fetchPolicy = _e === void 0 ? defaultFetchPolicy : _e,
|
||||
// Make sure we don't store "standby" as the initialFetchPolicy.
|
||||
_f = options.initialFetchPolicy,
|
||||
// Make sure we don't store "standby" as the initialFetchPolicy.
|
||||
initialFetchPolicy = _f === void 0 ? fetchPolicy === "standby" ? defaultFetchPolicy : (fetchPolicy) : _f;
|
||||
_this.options = __assign(__assign({}, options), {
|
||||
// Remember the initial options.fetchPolicy so we can revert back to this
|
||||
// policy when variables change. This information can also be specified
|
||||
// (or overridden) by providing options.initialFetchPolicy explicitly.
|
||||
initialFetchPolicy: initialFetchPolicy,
|
||||
// This ensures this.options.fetchPolicy always has a string value, in
|
||||
// case options.fetchPolicy was not provided.
|
||||
fetchPolicy: fetchPolicy });
|
||||
_this.queryId = queryInfo.queryId || queryManager.generateQueryId();
|
||||
var opDef = getOperationDefinition(_this.query);
|
||||
_this.queryName = opDef && opDef.name && opDef.name.value;
|
||||
return _this;
|
||||
}
|
||||
Object.defineProperty(ObservableQuery.prototype, "query", {
|
||||
// The `query` computed property will always reflect the document transformed
|
||||
// by the last run query. `this.options.query` will always reflect the raw
|
||||
// untransformed query to ensure document transforms with runtime conditionals
|
||||
// are run on the original document.
|
||||
get: function () {
|
||||
return this.lastQuery || this.options.query;
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
Object.defineProperty(ObservableQuery.prototype, "variables", {
|
||||
// Computed shorthand for this.options.variables, preserved for
|
||||
// backwards compatibility.
|
||||
/**
|
||||
* An object containing the variables that were provided for the query.
|
||||
*/
|
||||
get: function () {
|
||||
return this.options.variables;
|
||||
},
|
||||
enumerable: false,
|
||||
configurable: true
|
||||
});
|
||||
ObservableQuery.prototype.result = function () {
|
||||
var _this = this;
|
||||
return new Promise(function (resolve, reject) {
|
||||
// TODO: this code doesn’t actually make sense insofar as the observer
|
||||
// will never exist in this.observers due how zen-observable wraps observables.
|
||||
// https://github.com/zenparsing/zen-observable/blob/master/src/Observable.js#L169
|
||||
var observer = {
|
||||
next: function (result) {
|
||||
resolve(result);
|
||||
// Stop the query within the QueryManager if we can before
|
||||
// this function returns.
|
||||
//
|
||||
// We do this in order to prevent observers piling up within
|
||||
// the QueryManager. Notice that we only fully unsubscribe
|
||||
// from the subscription in a setTimeout(..., 0) call. This call can
|
||||
// actually be handled by the browser at a much later time. If queries
|
||||
// are fired in the meantime, observers that should have been removed
|
||||
// from the QueryManager will continue to fire, causing an unnecessary
|
||||
// performance hit.
|
||||
_this.observers.delete(observer);
|
||||
if (!_this.observers.size) {
|
||||
_this.queryManager.removeQuery(_this.queryId);
|
||||
}
|
||||
setTimeout(function () {
|
||||
subscription.unsubscribe();
|
||||
}, 0);
|
||||
},
|
||||
error: reject,
|
||||
};
|
||||
var subscription = _this.subscribe(observer);
|
||||
});
|
||||
};
|
||||
/** @internal */
|
||||
ObservableQuery.prototype.resetDiff = function () {
|
||||
this.queryInfo.resetDiff();
|
||||
};
|
||||
ObservableQuery.prototype.getCurrentResult = function (saveAsLastResult) {
|
||||
if (saveAsLastResult === void 0) { saveAsLastResult = true; }
|
||||
// Use the last result as long as the variables match this.variables.
|
||||
var lastResult = this.getLastResult(true);
|
||||
var networkStatus = this.queryInfo.networkStatus ||
|
||||
(lastResult && lastResult.networkStatus) ||
|
||||
NetworkStatus.ready;
|
||||
var result = __assign(__assign({}, lastResult), { loading: isNetworkRequestInFlight(networkStatus), networkStatus: networkStatus });
|
||||
var _a = this.options.fetchPolicy, fetchPolicy = _a === void 0 ? "cache-first" : _a;
|
||||
if (
|
||||
// These fetch policies should never deliver data from the cache, unless
|
||||
// redelivering a previously delivered result.
|
||||
skipCacheDataFor(fetchPolicy) ||
|
||||
// If this.options.query has @client(always: true) fields, we cannot
|
||||
// trust diff.result, since it was read from the cache without running
|
||||
// local resolvers (and it's too late to run resolvers now, since we must
|
||||
// return a result synchronously).
|
||||
this.queryManager.getDocumentInfo(this.query).hasForcedResolvers) {
|
||||
// Fall through.
|
||||
}
|
||||
else if (this.waitForOwnResult) {
|
||||
// This would usually be a part of `QueryInfo.getDiff()`.
|
||||
// which we skip in the waitForOwnResult case since we are not
|
||||
// interested in the diff.
|
||||
this.queryInfo["updateWatch"]();
|
||||
}
|
||||
else {
|
||||
var diff = this.queryInfo.getDiff();
|
||||
if (diff.complete || this.options.returnPartialData) {
|
||||
result.data = diff.result;
|
||||
}
|
||||
if (equal(result.data, {})) {
|
||||
result.data = void 0;
|
||||
}
|
||||
if (diff.complete) {
|
||||
// Similar to setting result.partial to false, but taking advantage of the
|
||||
// falsiness of missing fields.
|
||||
delete result.partial;
|
||||
// If the diff is complete, and we're using a FetchPolicy that
|
||||
// terminates after a complete cache read, we can assume the next result
|
||||
// we receive will have NetworkStatus.ready and !loading.
|
||||
if (diff.complete &&
|
||||
result.networkStatus === NetworkStatus.loading &&
|
||||
(fetchPolicy === "cache-first" || fetchPolicy === "cache-only")) {
|
||||
result.networkStatus = NetworkStatus.ready;
|
||||
result.loading = false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
result.partial = true;
|
||||
}
|
||||
if (globalThis.__DEV__ !== false &&
|
||||
!diff.complete &&
|
||||
!this.options.partialRefetch &&
|
||||
!result.loading &&
|
||||
!result.data &&
|
||||
!result.error) {
|
||||
logMissingFieldErrors(diff.missing);
|
||||
}
|
||||
}
|
||||
if (saveAsLastResult) {
|
||||
this.updateLastResult(result);
|
||||
}
|
||||
return result;
|
||||
};
|
||||
// Compares newResult to the snapshot we took of this.lastResult when it was
|
||||
// first received.
|
||||
ObservableQuery.prototype.isDifferentFromLastResult = function (newResult, variables) {
|
||||
if (!this.last) {
|
||||
return true;
|
||||
}
|
||||
var resultIsDifferent = this.queryManager.getDocumentInfo(this.query).hasNonreactiveDirective ?
|
||||
!equalByQuery(this.query, this.last.result, newResult, this.variables)
|
||||
: !equal(this.last.result, newResult);
|
||||
return (resultIsDifferent || (variables && !equal(this.last.variables, variables)));
|
||||
};
|
||||
ObservableQuery.prototype.getLast = function (key, variablesMustMatch) {
|
||||
var last = this.last;
|
||||
if (last &&
|
||||
last[key] &&
|
||||
(!variablesMustMatch || equal(last.variables, this.variables))) {
|
||||
return last[key];
|
||||
}
|
||||
};
|
||||
ObservableQuery.prototype.getLastResult = function (variablesMustMatch) {
|
||||
return this.getLast("result", variablesMustMatch);
|
||||
};
|
||||
ObservableQuery.prototype.getLastError = function (variablesMustMatch) {
|
||||
return this.getLast("error", variablesMustMatch);
|
||||
};
|
||||
ObservableQuery.prototype.resetLastResults = function () {
|
||||
delete this.last;
|
||||
this.isTornDown = false;
|
||||
};
|
||||
ObservableQuery.prototype.resetQueryStoreErrors = function () {
|
||||
this.queryManager.resetErrors(this.queryId);
|
||||
};
|
||||
/**
|
||||
* Update the variables of this observable query, and fetch the new results.
|
||||
* This method should be preferred over `setVariables` in most use cases.
|
||||
*
|
||||
* @param variables - The new set of variables. If there are missing variables,
|
||||
* the previous values of those variables will be used.
|
||||
*/
|
||||
ObservableQuery.prototype.refetch = function (variables) {
|
||||
var _a;
|
||||
var reobserveOptions = {
|
||||
// Always disable polling for refetches.
|
||||
pollInterval: 0,
|
||||
};
|
||||
// Unless the provided fetchPolicy always consults the network
|
||||
// (no-cache, network-only, or cache-and-network), override it with
|
||||
// network-only to force the refetch for this fetchQuery call.
|
||||
var fetchPolicy = this.options.fetchPolicy;
|
||||
if (fetchPolicy === "cache-and-network") {
|
||||
reobserveOptions.fetchPolicy = fetchPolicy;
|
||||
}
|
||||
else if (fetchPolicy === "no-cache") {
|
||||
reobserveOptions.fetchPolicy = "no-cache";
|
||||
}
|
||||
else {
|
||||
reobserveOptions.fetchPolicy = "network-only";
|
||||
}
|
||||
if (globalThis.__DEV__ !== false && variables && hasOwnProperty.call(variables, "variables")) {
|
||||
var queryDef = getQueryDefinition(this.query);
|
||||
var vars = queryDef.variableDefinitions;
|
||||
if (!vars || !vars.some(function (v) { return v.variable.name.value === "variables"; })) {
|
||||
globalThis.__DEV__ !== false && invariant.warn(
|
||||
20,
|
||||
variables,
|
||||
((_a = queryDef.name) === null || _a === void 0 ? void 0 : _a.value) || queryDef
|
||||
);
|
||||
}
|
||||
}
|
||||
if (variables && !equal(this.options.variables, variables)) {
|
||||
// Update the existing options with new variables
|
||||
reobserveOptions.variables = this.options.variables = __assign(__assign({}, this.options.variables), variables);
|
||||
}
|
||||
this.queryInfo.resetLastWrite();
|
||||
return this.reobserve(reobserveOptions, NetworkStatus.refetch);
|
||||
};
|
||||
/**
|
||||
* A function that helps you fetch the next set of results for a [paginated list field](https://www.apollographql.com/docs/react/pagination/core-api/).
|
||||
*/
|
||||
ObservableQuery.prototype.fetchMore = function (fetchMoreOptions) {
|
||||
var _this = this;
|
||||
var combinedOptions = __assign(__assign({}, (fetchMoreOptions.query ? fetchMoreOptions : (__assign(__assign(__assign(__assign({}, this.options), { query: this.options.query }), fetchMoreOptions), { variables: __assign(__assign({}, this.options.variables), fetchMoreOptions.variables) })))), {
|
||||
// The fetchMore request goes immediately to the network and does
|
||||
// not automatically write its result to the cache (hence no-cache
|
||||
// instead of network-only), because we allow the caller of
|
||||
// fetchMore to provide an updateQuery callback that determines how
|
||||
// the data gets written to the cache.
|
||||
fetchPolicy: "no-cache" });
|
||||
combinedOptions.query = this.transformDocument(combinedOptions.query);
|
||||
var qid = this.queryManager.generateQueryId();
|
||||
// If a temporary query is passed to `fetchMore`, we don't want to store
|
||||
// it as the last query result since it may be an optimized query for
|
||||
// pagination. We will however run the transforms on the original document
|
||||
// as well as the document passed in `fetchMoreOptions` to ensure the cache
|
||||
// uses the most up-to-date document which may rely on runtime conditionals.
|
||||
this.lastQuery =
|
||||
fetchMoreOptions.query ?
|
||||
this.transformDocument(this.options.query)
|
||||
: combinedOptions.query;
|
||||
// Simulate a loading result for the original query with
|
||||
// result.networkStatus === NetworkStatus.fetchMore.
|
||||
var queryInfo = this.queryInfo;
|
||||
var originalNetworkStatus = queryInfo.networkStatus;
|
||||
queryInfo.networkStatus = NetworkStatus.fetchMore;
|
||||
if (combinedOptions.notifyOnNetworkStatusChange) {
|
||||
this.observe();
|
||||
}
|
||||
var updatedQuerySet = new Set();
|
||||
return this.queryManager
|
||||
.fetchQuery(qid, combinedOptions, NetworkStatus.fetchMore)
|
||||
.then(function (fetchMoreResult) {
|
||||
_this.queryManager.removeQuery(qid);
|
||||
if (queryInfo.networkStatus === NetworkStatus.fetchMore) {
|
||||
queryInfo.networkStatus = originalNetworkStatus;
|
||||
}
|
||||
// Performing this cache update inside a cache.batch transaction ensures
|
||||
// any affected cache.watch watchers are notified at most once about any
|
||||
// updates. Most watchers will be using the QueryInfo class, which
|
||||
// responds to notifications by calling reobserveCacheFirst to deliver
|
||||
// fetchMore cache results back to this ObservableQuery.
|
||||
_this.queryManager.cache.batch({
|
||||
update: function (cache) {
|
||||
var updateQuery = fetchMoreOptions.updateQuery;
|
||||
if (updateQuery) {
|
||||
cache.updateQuery({
|
||||
query: _this.query,
|
||||
variables: _this.variables,
|
||||
returnPartialData: true,
|
||||
optimistic: false,
|
||||
}, function (previous) {
|
||||
return updateQuery(previous, {
|
||||
fetchMoreResult: fetchMoreResult.data,
|
||||
variables: combinedOptions.variables,
|
||||
});
|
||||
});
|
||||
}
|
||||
else {
|
||||
// If we're using a field policy instead of updateQuery, the only
|
||||
// thing we need to do is write the new data to the cache using
|
||||
// combinedOptions.variables (instead of this.variables, which is
|
||||
// what this.updateQuery uses, because it works by abusing the
|
||||
// original field value, keyed by the original variables).
|
||||
cache.writeQuery({
|
||||
query: combinedOptions.query,
|
||||
variables: combinedOptions.variables,
|
||||
data: fetchMoreResult.data,
|
||||
});
|
||||
}
|
||||
},
|
||||
onWatchUpdated: function (watch) {
|
||||
// Record the DocumentNode associated with any watched query whose
|
||||
// data were updated by the cache writes above.
|
||||
updatedQuerySet.add(watch.query);
|
||||
},
|
||||
});
|
||||
return fetchMoreResult;
|
||||
})
|
||||
.finally(function () {
|
||||
// In case the cache writes above did not generate a broadcast
|
||||
// notification (which would have been intercepted by onWatchUpdated),
|
||||
// likely because the written data were the same as what was already in
|
||||
// the cache, we still want fetchMore to deliver its final loading:false
|
||||
// result with the unchanged data.
|
||||
if (!updatedQuerySet.has(_this.query)) {
|
||||
reobserveCacheFirst(_this);
|
||||
}
|
||||
});
|
||||
};
|
||||
// XXX the subscription variables are separate from the query variables.
|
||||
// if you want to update subscription variables, right now you have to do that separately,
|
||||
// and you can only do it by stopping the subscription and then subscribing again with new variables.
|
||||
/**
|
||||
* A function that enables you to execute a [subscription](https://www.apollographql.com/docs/react/data/subscriptions/), usually to subscribe to specific fields that were included in the query.
|
||||
*
|
||||
* This function returns _another_ function that you can call to terminate the subscription.
|
||||
*/
|
||||
ObservableQuery.prototype.subscribeToMore = function (options) {
|
||||
var _this = this;
|
||||
var subscription = this.queryManager
|
||||
.startGraphQLSubscription({
|
||||
query: options.document,
|
||||
variables: options.variables,
|
||||
context: options.context,
|
||||
})
|
||||
.subscribe({
|
||||
next: function (subscriptionData) {
|
||||
var updateQuery = options.updateQuery;
|
||||
if (updateQuery) {
|
||||
_this.updateQuery(function (previous, _a) {
|
||||
var variables = _a.variables;
|
||||
return updateQuery(previous, {
|
||||
subscriptionData: subscriptionData,
|
||||
variables: variables,
|
||||
});
|
||||
});
|
||||
}
|
||||
},
|
||||
error: function (err) {
|
||||
if (options.onError) {
|
||||
options.onError(err);
|
||||
return;
|
||||
}
|
||||
globalThis.__DEV__ !== false && invariant.error(21, err);
|
||||
},
|
||||
});
|
||||
this.subscriptions.add(subscription);
|
||||
return function () {
|
||||
if (_this.subscriptions.delete(subscription)) {
|
||||
subscription.unsubscribe();
|
||||
}
|
||||
};
|
||||
};
|
||||
ObservableQuery.prototype.setOptions = function (newOptions) {
|
||||
return this.reobserve(newOptions);
|
||||
};
|
||||
ObservableQuery.prototype.silentSetOptions = function (newOptions) {
|
||||
var mergedOptions = compact(this.options, newOptions || {});
|
||||
assign(this.options, mergedOptions);
|
||||
};
|
||||
/**
|
||||
* Update the variables of this observable query, and fetch the new results
|
||||
* if they've changed. Most users should prefer `refetch` instead of
|
||||
* `setVariables` in order to to be properly notified of results even when
|
||||
* they come from the cache.
|
||||
*
|
||||
* Note: the `next` callback will *not* fire if the variables have not changed
|
||||
* or if the result is coming from cache.
|
||||
*
|
||||
* Note: the promise will return the old results immediately if the variables
|
||||
* have not changed.
|
||||
*
|
||||
* Note: the promise will return null immediately if the query is not active
|
||||
* (there are no subscribers).
|
||||
*
|
||||
* @param variables - The new set of variables. If there are missing variables,
|
||||
* the previous values of those variables will be used.
|
||||
*/
|
||||
ObservableQuery.prototype.setVariables = function (variables) {
|
||||
if (equal(this.variables, variables)) {
|
||||
// If we have no observers, then we don't actually want to make a network
|
||||
// request. As soon as someone observes the query, the request will kick
|
||||
// off. For now, we just store any changes. (See #1077)
|
||||
return this.observers.size ? this.result() : Promise.resolve();
|
||||
}
|
||||
this.options.variables = variables;
|
||||
// See comment above
|
||||
if (!this.observers.size) {
|
||||
return Promise.resolve();
|
||||
}
|
||||
return this.reobserve({
|
||||
// Reset options.fetchPolicy to its original value.
|
||||
fetchPolicy: this.options.initialFetchPolicy,
|
||||
variables: variables,
|
||||
}, NetworkStatus.setVariables);
|
||||
};
|
||||
/**
|
||||
* A function that enables you to update the query's cached result without executing a followup GraphQL operation.
|
||||
*
|
||||
* See [using updateQuery and updateFragment](https://www.apollographql.com/docs/react/caching/cache-interaction/#using-updatequery-and-updatefragment) for additional information.
|
||||
*/
|
||||
ObservableQuery.prototype.updateQuery = function (mapFn) {
|
||||
var queryManager = this.queryManager;
|
||||
var result = queryManager.cache.diff({
|
||||
query: this.options.query,
|
||||
variables: this.variables,
|
||||
returnPartialData: true,
|
||||
optimistic: false,
|
||||
}).result;
|
||||
var newResult = mapFn(result, {
|
||||
variables: this.variables,
|
||||
});
|
||||
if (newResult) {
|
||||
queryManager.cache.writeQuery({
|
||||
query: this.options.query,
|
||||
data: newResult,
|
||||
variables: this.variables,
|
||||
});
|
||||
queryManager.broadcastQueries();
|
||||
}
|
||||
};
|
||||
/**
|
||||
* A function that instructs the query to begin re-executing at a specified interval (in milliseconds).
|
||||
*/
|
||||
ObservableQuery.prototype.startPolling = function (pollInterval) {
|
||||
this.options.pollInterval = pollInterval;
|
||||
this.updatePolling();
|
||||
};
|
||||
/**
|
||||
* A function that instructs the query to stop polling after a previous call to `startPolling`.
|
||||
*/
|
||||
ObservableQuery.prototype.stopPolling = function () {
|
||||
this.options.pollInterval = 0;
|
||||
this.updatePolling();
|
||||
};
|
||||
// Update options.fetchPolicy according to options.nextFetchPolicy.
|
||||
ObservableQuery.prototype.applyNextFetchPolicy = function (reason,
|
||||
// It's possible to use this method to apply options.nextFetchPolicy to
|
||||
// options.fetchPolicy even if options !== this.options, though that happens
|
||||
// most often when the options are temporary, used for only one request and
|
||||
// then thrown away, so nextFetchPolicy may not end up mattering.
|
||||
options) {
|
||||
if (options.nextFetchPolicy) {
|
||||
var _a = options.fetchPolicy, fetchPolicy = _a === void 0 ? "cache-first" : _a, _b = options.initialFetchPolicy, initialFetchPolicy = _b === void 0 ? fetchPolicy : _b;
|
||||
if (fetchPolicy === "standby") {
|
||||
// Do nothing, leaving options.fetchPolicy unchanged.
|
||||
}
|
||||
else if (typeof options.nextFetchPolicy === "function") {
|
||||
// When someone chooses "cache-and-network" or "network-only" as their
|
||||
// initial FetchPolicy, they often do not want future cache updates to
|
||||
// trigger unconditional network requests, which is what repeatedly
|
||||
// applying the "cache-and-network" or "network-only" policies would
|
||||
// seem to imply. Instead, when the cache reports an update after the
|
||||
// initial network request, it may be desirable for subsequent network
|
||||
// requests to be triggered only if the cache result is incomplete. To
|
||||
// that end, the options.nextFetchPolicy option provides an easy way to
|
||||
// update options.fetchPolicy after the initial network request, without
|
||||
// having to call observableQuery.setOptions.
|
||||
options.fetchPolicy = options.nextFetchPolicy(fetchPolicy, {
|
||||
reason: reason,
|
||||
options: options,
|
||||
observable: this,
|
||||
initialFetchPolicy: initialFetchPolicy,
|
||||
});
|
||||
}
|
||||
else if (reason === "variables-changed") {
|
||||
options.fetchPolicy = initialFetchPolicy;
|
||||
}
|
||||
else {
|
||||
options.fetchPolicy = options.nextFetchPolicy;
|
||||
}
|
||||
}
|
||||
return options.fetchPolicy;
|
||||
};
|
||||
ObservableQuery.prototype.fetch = function (options, newNetworkStatus, query) {
|
||||
// TODO Make sure we update the networkStatus (and infer fetchVariables)
|
||||
// before actually committing to the fetch.
|
||||
this.queryManager.setObservableQuery(this);
|
||||
return this.queryManager["fetchConcastWithInfo"](this.queryId, options, newNetworkStatus, query);
|
||||
};
|
||||
// Turns polling on or off based on this.options.pollInterval.
|
||||
ObservableQuery.prototype.updatePolling = function () {
|
||||
var _this = this;
|
||||
// Avoid polling in SSR mode
|
||||
if (this.queryManager.ssrMode) {
|
||||
return;
|
||||
}
|
||||
var _a = this, pollingInfo = _a.pollingInfo, pollInterval = _a.options.pollInterval;
|
||||
if (!pollInterval) {
|
||||
if (pollingInfo) {
|
||||
clearTimeout(pollingInfo.timeout);
|
||||
delete this.pollingInfo;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (pollingInfo && pollingInfo.interval === pollInterval) {
|
||||
return;
|
||||
}
|
||||
invariant(pollInterval, 22);
|
||||
var info = pollingInfo || (this.pollingInfo = {});
|
||||
info.interval = pollInterval;
|
||||
var maybeFetch = function () {
|
||||
var _a, _b;
|
||||
if (_this.pollingInfo) {
|
||||
if (!isNetworkRequestInFlight(_this.queryInfo.networkStatus) &&
|
||||
!((_b = (_a = _this.options).skipPollAttempt) === null || _b === void 0 ? void 0 : _b.call(_a))) {
|
||||
_this.reobserve({
|
||||
// Most fetchPolicy options don't make sense to use in a polling context, as
|
||||
// users wouldn't want to be polling the cache directly. However, network-only and
|
||||
// no-cache are both useful for when the user wants to control whether or not the
|
||||
// polled results are written to the cache.
|
||||
fetchPolicy: _this.options.initialFetchPolicy === "no-cache" ?
|
||||
"no-cache"
|
||||
: "network-only",
|
||||
}, NetworkStatus.poll).then(poll, poll);
|
||||
}
|
||||
else {
|
||||
poll();
|
||||
}
|
||||
}
|
||||
};
|
||||
var poll = function () {
|
||||
var info = _this.pollingInfo;
|
||||
if (info) {
|
||||
clearTimeout(info.timeout);
|
||||
info.timeout = setTimeout(maybeFetch, info.interval);
|
||||
}
|
||||
};
|
||||
poll();
|
||||
};
|
||||
ObservableQuery.prototype.updateLastResult = function (newResult, variables) {
|
||||
if (variables === void 0) { variables = this.variables; }
|
||||
var error = this.getLastError();
|
||||
// Preserve this.last.error unless the variables have changed.
|
||||
if (error && this.last && !equal(variables, this.last.variables)) {
|
||||
error = void 0;
|
||||
}
|
||||
return (this.last = __assign({ result: this.queryManager.assumeImmutableResults ?
|
||||
newResult
|
||||
: cloneDeep(newResult), variables: variables }, (error ? { error: error } : null)));
|
||||
};
|
||||
ObservableQuery.prototype.reobserveAsConcast = function (newOptions, newNetworkStatus) {
|
||||
var _this = this;
|
||||
this.isTornDown = false;
|
||||
var useDisposableConcast =
|
||||
// Refetching uses a disposable Concast to allow refetches using different
|
||||
// options/variables, without permanently altering the options of the
|
||||
// original ObservableQuery.
|
||||
newNetworkStatus === NetworkStatus.refetch ||
|
||||
// The fetchMore method does not actually call the reobserve method, but,
|
||||
// if it did, it would definitely use a disposable Concast.
|
||||
newNetworkStatus === NetworkStatus.fetchMore ||
|
||||
// Polling uses a disposable Concast so the polling options (which force
|
||||
// fetchPolicy to be "network-only" or "no-cache") won't override the original options.
|
||||
newNetworkStatus === NetworkStatus.poll;
|
||||
// Save the old variables, since Object.assign may modify them below.
|
||||
var oldVariables = this.options.variables;
|
||||
var oldFetchPolicy = this.options.fetchPolicy;
|
||||
var mergedOptions = compact(this.options, newOptions || {});
|
||||
var options = useDisposableConcast ?
|
||||
// Disposable Concast fetches receive a shallow copy of this.options
|
||||
// (merged with newOptions), leaving this.options unmodified.
|
||||
mergedOptions
|
||||
: assign(this.options, mergedOptions);
|
||||
// Don't update options.query with the transformed query to avoid
|
||||
// overwriting this.options.query when we aren't using a disposable concast.
|
||||
// We want to ensure we can re-run the custom document transforms the next
|
||||
// time a request is made against the original query.
|
||||
var query = this.transformDocument(options.query);
|
||||
this.lastQuery = query;
|
||||
if (!useDisposableConcast) {
|
||||
// We can skip calling updatePolling if we're not changing this.options.
|
||||
this.updatePolling();
|
||||
// Reset options.fetchPolicy to its original value when variables change,
|
||||
// unless a new fetchPolicy was provided by newOptions.
|
||||
if (newOptions &&
|
||||
newOptions.variables &&
|
||||
!equal(newOptions.variables, oldVariables) &&
|
||||
// Don't mess with the fetchPolicy if it's currently "standby".
|
||||
options.fetchPolicy !== "standby" &&
|
||||
// If we're changing the fetchPolicy anyway, don't try to change it here
|
||||
// using applyNextFetchPolicy. The explicit options.fetchPolicy wins.
|
||||
options.fetchPolicy === oldFetchPolicy) {
|
||||
this.applyNextFetchPolicy("variables-changed", options);
|
||||
if (newNetworkStatus === void 0) {
|
||||
newNetworkStatus = NetworkStatus.setVariables;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.waitForOwnResult && (this.waitForOwnResult = skipCacheDataFor(options.fetchPolicy));
|
||||
var finishWaitingForOwnResult = function () {
|
||||
if (_this.concast === concast) {
|
||||
_this.waitForOwnResult = false;
|
||||
}
|
||||
};
|
||||
var variables = options.variables && __assign({}, options.variables);
|
||||
var _a = this.fetch(options, newNetworkStatus, query), concast = _a.concast, fromLink = _a.fromLink;
|
||||
var observer = {
|
||||
next: function (result) {
|
||||
if (equal(_this.variables, variables)) {
|
||||
finishWaitingForOwnResult();
|
||||
_this.reportResult(result, variables);
|
||||
}
|
||||
},
|
||||
error: function (error) {
|
||||
if (equal(_this.variables, variables)) {
|
||||
finishWaitingForOwnResult();
|
||||
_this.reportError(error, variables);
|
||||
}
|
||||
},
|
||||
};
|
||||
if (!useDisposableConcast && (fromLink || !this.concast)) {
|
||||
// We use the {add,remove}Observer methods directly to avoid wrapping
|
||||
// observer with an unnecessary SubscriptionObserver object.
|
||||
if (this.concast && this.observer) {
|
||||
this.concast.removeObserver(this.observer);
|
||||
}
|
||||
this.concast = concast;
|
||||
this.observer = observer;
|
||||
}
|
||||
concast.addObserver(observer);
|
||||
return concast;
|
||||
};
|
||||
ObservableQuery.prototype.reobserve = function (newOptions, newNetworkStatus) {
|
||||
return this.reobserveAsConcast(newOptions, newNetworkStatus)
|
||||
.promise;
|
||||
};
|
||||
ObservableQuery.prototype.resubscribeAfterError = function () {
|
||||
var args = [];
|
||||
for (var _i = 0; _i < arguments.length; _i++) {
|
||||
args[_i] = arguments[_i];
|
||||
}
|
||||
// If `lastError` is set in the current when the subscription is re-created,
|
||||
// the subscription will immediately receive the error, which will
|
||||
// cause it to terminate again. To avoid this, we first clear
|
||||
// the last error/result from the `observableQuery` before re-starting
|
||||
// the subscription, and restore the last value afterwards so that the
|
||||
// subscription has a chance to stay open.
|
||||
var last = this.last;
|
||||
this.resetLastResults();
|
||||
var subscription = this.subscribe.apply(this, args);
|
||||
this.last = last;
|
||||
return subscription;
|
||||
};
|
||||
// (Re)deliver the current result to this.observers without applying fetch
|
||||
// policies or making network requests.
|
||||
ObservableQuery.prototype.observe = function () {
|
||||
this.reportResult(
|
||||
// Passing false is important so that this.getCurrentResult doesn't
|
||||
// save the fetchMore result as this.lastResult, causing it to be
|
||||
// ignored due to the this.isDifferentFromLastResult check in
|
||||
// this.reportResult.
|
||||
this.getCurrentResult(false), this.variables);
|
||||
};
|
||||
ObservableQuery.prototype.reportResult = function (result, variables) {
|
||||
var lastError = this.getLastError();
|
||||
var isDifferent = this.isDifferentFromLastResult(result, variables);
|
||||
// Update the last result even when isDifferentFromLastResult returns false,
|
||||
// because the query may be using the @nonreactive directive, and we want to
|
||||
// save the the latest version of any nonreactive subtrees (in case
|
||||
// getCurrentResult is called), even though we skip broadcasting changes.
|
||||
if (lastError || !result.partial || this.options.returnPartialData) {
|
||||
this.updateLastResult(result, variables);
|
||||
}
|
||||
if (lastError || isDifferent) {
|
||||
iterateObserversSafely(this.observers, "next", result);
|
||||
}
|
||||
};
|
||||
ObservableQuery.prototype.reportError = function (error, variables) {
|
||||
// Since we don't get the current result on errors, only the error, we
|
||||
// must mirror the updates that occur in QueryStore.markQueryError here
|
||||
var errorResult = __assign(__assign({}, this.getLastResult()), { error: error, errors: error.graphQLErrors, networkStatus: NetworkStatus.error, loading: false });
|
||||
this.updateLastResult(errorResult, variables);
|
||||
iterateObserversSafely(this.observers, "error", (this.last.error = error));
|
||||
};
|
||||
ObservableQuery.prototype.hasObservers = function () {
|
||||
return this.observers.size > 0;
|
||||
};
|
||||
ObservableQuery.prototype.tearDownQuery = function () {
|
||||
if (this.isTornDown)
|
||||
return;
|
||||
if (this.concast && this.observer) {
|
||||
this.concast.removeObserver(this.observer);
|
||||
delete this.concast;
|
||||
delete this.observer;
|
||||
}
|
||||
this.stopPolling();
|
||||
// stop all active GraphQL subscriptions
|
||||
this.subscriptions.forEach(function (sub) { return sub.unsubscribe(); });
|
||||
this.subscriptions.clear();
|
||||
this.queryManager.stopQuery(this.queryId);
|
||||
this.observers.clear();
|
||||
this.isTornDown = true;
|
||||
};
|
||||
ObservableQuery.prototype.transformDocument = function (document) {
|
||||
return this.queryManager.transform(document);
|
||||
};
|
||||
return ObservableQuery;
|
||||
}(Observable));
|
||||
export { ObservableQuery };
|
||||
// Necessary because the ObservableQuery constructor has a different
|
||||
// signature than the Observable constructor.
|
||||
fixObservableSubclass(ObservableQuery);
|
||||
// Reobserve with fetchPolicy effectively set to "cache-first", triggering
|
||||
// delivery of any new data from the cache, possibly falling back to the network
|
||||
// if any cache data are missing. This allows _complete_ cache results to be
|
||||
// delivered without also kicking off unnecessary network requests when
|
||||
// this.options.fetchPolicy is "cache-and-network" or "network-only". When
|
||||
// this.options.fetchPolicy is any other policy ("cache-first", "cache-only",
|
||||
// "standby", or "no-cache"), we call this.reobserve() as usual.
|
||||
export function reobserveCacheFirst(obsQuery) {
|
||||
var _a = obsQuery.options, fetchPolicy = _a.fetchPolicy, nextFetchPolicy = _a.nextFetchPolicy;
|
||||
if (fetchPolicy === "cache-and-network" || fetchPolicy === "network-only") {
|
||||
return obsQuery.reobserve({
|
||||
fetchPolicy: "cache-first",
|
||||
// Use a temporary nextFetchPolicy function that replaces itself with the
|
||||
// previous nextFetchPolicy value and returns the original fetchPolicy.
|
||||
nextFetchPolicy: function (currentFetchPolicy, context) {
|
||||
// Replace this nextFetchPolicy function in the options object with the
|
||||
// original this.options.nextFetchPolicy value.
|
||||
this.nextFetchPolicy = nextFetchPolicy;
|
||||
// If the original nextFetchPolicy value was a function, give it a
|
||||
// chance to decide what happens here.
|
||||
if (typeof this.nextFetchPolicy === "function") {
|
||||
return this.nextFetchPolicy(currentFetchPolicy, context);
|
||||
}
|
||||
// Otherwise go back to the original this.options.fetchPolicy.
|
||||
return fetchPolicy;
|
||||
},
|
||||
});
|
||||
}
|
||||
return obsQuery.reobserve();
|
||||
}
|
||||
function defaultSubscriptionObserverErrorCallback(error) {
|
||||
globalThis.__DEV__ !== false && invariant.error(23, error.message, error.stack);
|
||||
}
|
||||
export function logMissingFieldErrors(missing) {
|
||||
if (globalThis.__DEV__ !== false && missing) {
|
||||
globalThis.__DEV__ !== false && invariant.debug(24, missing);
|
||||
}
|
||||
}
|
||||
function skipCacheDataFor(fetchPolicy /* `undefined` would mean `"cache-first"` */) {
|
||||
return (fetchPolicy === "network-only" ||
|
||||
fetchPolicy === "no-cache" ||
|
||||
fetchPolicy === "standby");
|
||||
}
|
||||
//# sourceMappingURL=ObservableQuery.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/core/ObservableQuery.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/ObservableQuery.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
61
graphql-subscription/node_modules/@apollo/client/core/QueryInfo.d.ts
generated
vendored
Normal file
61
graphql-subscription/node_modules/@apollo/client/core/QueryInfo.d.ts
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
import type { DocumentNode, GraphQLError } from "graphql";
|
||||
import type { Cache } from "../cache/index.js";
|
||||
import type { WatchQueryOptions, ErrorPolicy } from "./watchQueryOptions.js";
|
||||
import type { ObservableQuery } from "./ObservableQuery.js";
|
||||
import type { QueryListener } from "./types.js";
|
||||
import type { FetchResult } from "../link/core/index.js";
|
||||
import { NetworkStatus } from "./networkStatus.js";
|
||||
import type { ApolloError } from "../errors/index.js";
|
||||
import type { QueryManager } from "./QueryManager.js";
|
||||
export type QueryStoreValue = Pick<QueryInfo, "variables" | "networkStatus" | "networkError" | "graphQLErrors">;
|
||||
export declare const enum CacheWriteBehavior {
|
||||
FORBID = 0,
|
||||
OVERWRITE = 1,
|
||||
MERGE = 2
|
||||
}
|
||||
export declare class QueryInfo {
|
||||
readonly queryId: string;
|
||||
listeners: Set<QueryListener>;
|
||||
document: DocumentNode | null;
|
||||
lastRequestId: number;
|
||||
variables?: Record<string, any>;
|
||||
networkStatus?: NetworkStatus;
|
||||
networkError?: Error | null;
|
||||
graphQLErrors?: ReadonlyArray<GraphQLError>;
|
||||
stopped: boolean;
|
||||
private cache;
|
||||
constructor(queryManager: QueryManager<any>, queryId?: string);
|
||||
init(query: {
|
||||
document: DocumentNode;
|
||||
variables: Record<string, any> | undefined;
|
||||
networkStatus?: NetworkStatus;
|
||||
observableQuery?: ObservableQuery<any, any>;
|
||||
lastRequestId?: number;
|
||||
}): this;
|
||||
private dirty;
|
||||
private notifyTimeout?;
|
||||
reset(): void;
|
||||
resetDiff(): void;
|
||||
getDiff(): Cache.DiffResult<any>;
|
||||
private lastDiff?;
|
||||
private updateLastDiff;
|
||||
private getDiffOptions;
|
||||
setDiff(diff: Cache.DiffResult<any> | null): void;
|
||||
readonly observableQuery: ObservableQuery<any, any> | null;
|
||||
private oqListener?;
|
||||
setObservableQuery(oq: ObservableQuery<any, any> | null): void;
|
||||
notify(): void;
|
||||
private shouldNotify;
|
||||
stop(): void;
|
||||
private cancel;
|
||||
private lastWatch?;
|
||||
private updateWatch;
|
||||
private lastWrite?;
|
||||
resetLastWrite(): void;
|
||||
private shouldWrite;
|
||||
markResult<T>(result: FetchResult<T>, document: DocumentNode, options: Pick<WatchQueryOptions, "variables" | "fetchPolicy" | "errorPolicy">, cacheWriteBehavior: CacheWriteBehavior): void;
|
||||
markReady(): NetworkStatus;
|
||||
markError(error: ApolloError): ApolloError;
|
||||
}
|
||||
export declare function shouldWriteResult<T>(result: FetchResult<T>, errorPolicy?: ErrorPolicy): boolean;
|
||||
//# sourceMappingURL=QueryInfo.d.ts.map
|
||||
403
graphql-subscription/node_modules/@apollo/client/core/QueryInfo.js
generated
vendored
Normal file
403
graphql-subscription/node_modules/@apollo/client/core/QueryInfo.js
generated
vendored
Normal file
@@ -0,0 +1,403 @@
|
||||
import { __assign } from "tslib";
|
||||
import { equal } from "@wry/equality";
|
||||
import { DeepMerger } from "../utilities/index.js";
|
||||
import { mergeIncrementalData } from "../utilities/index.js";
|
||||
import { reobserveCacheFirst } from "./ObservableQuery.js";
|
||||
import { isNonEmptyArray, graphQLResultHasError, canUseWeakMap, } from "../utilities/index.js";
|
||||
import { NetworkStatus, isNetworkRequestInFlight } from "./networkStatus.js";
|
||||
var destructiveMethodCounts = new (canUseWeakMap ? WeakMap : Map)();
|
||||
function wrapDestructiveCacheMethod(cache, methodName) {
|
||||
var original = cache[methodName];
|
||||
if (typeof original === "function") {
|
||||
// @ts-expect-error this is just too generic to be typed correctly
|
||||
cache[methodName] = function () {
|
||||
destructiveMethodCounts.set(cache,
|
||||
// The %1e15 allows the count to wrap around to 0 safely every
|
||||
// quadrillion evictions, so there's no risk of overflow. To be
|
||||
// clear, this is more of a pedantic principle than something
|
||||
// that matters in any conceivable practical scenario.
|
||||
(destructiveMethodCounts.get(cache) + 1) % 1e15);
|
||||
// @ts-expect-error this is just too generic to be typed correctly
|
||||
return original.apply(this, arguments);
|
||||
};
|
||||
}
|
||||
}
|
||||
function cancelNotifyTimeout(info) {
|
||||
if (info["notifyTimeout"]) {
|
||||
clearTimeout(info["notifyTimeout"]);
|
||||
info["notifyTimeout"] = void 0;
|
||||
}
|
||||
}
|
||||
// A QueryInfo object represents a single query managed by the
|
||||
// QueryManager, which tracks all QueryInfo objects by queryId in its
|
||||
// this.queries Map. QueryInfo objects store the latest results and errors
|
||||
// for the given query, and are responsible for reporting those results to
|
||||
// the corresponding ObservableQuery, via the QueryInfo.notify method.
|
||||
// Results are reported asynchronously whenever setDiff marks the
|
||||
// QueryInfo object as dirty, though a call to the QueryManager's
|
||||
// broadcastQueries method may trigger the notification before it happens
|
||||
// automatically. This class used to be a simple interface type without
|
||||
// any field privacy or meaningful methods, which is why it still has so
|
||||
// many public fields. The effort to lock down and simplify the QueryInfo
|
||||
// interface is ongoing, and further improvements are welcome.
|
||||
var QueryInfo = /** @class */ (function () {
|
||||
function QueryInfo(queryManager, queryId) {
|
||||
if (queryId === void 0) { queryId = queryManager.generateQueryId(); }
|
||||
this.queryId = queryId;
|
||||
this.listeners = new Set();
|
||||
this.document = null;
|
||||
this.lastRequestId = 1;
|
||||
this.stopped = false;
|
||||
this.dirty = false;
|
||||
this.observableQuery = null;
|
||||
var cache = (this.cache = queryManager.cache);
|
||||
// Track how often cache.evict is called, since we want eviction to
|
||||
// override the feud-stopping logic in the markResult method, by
|
||||
// causing shouldWrite to return true. Wrapping the cache.evict method
|
||||
// is a bit of a hack, but it saves us from having to make eviction
|
||||
// counting an official part of the ApolloCache API.
|
||||
if (!destructiveMethodCounts.has(cache)) {
|
||||
destructiveMethodCounts.set(cache, 0);
|
||||
wrapDestructiveCacheMethod(cache, "evict");
|
||||
wrapDestructiveCacheMethod(cache, "modify");
|
||||
wrapDestructiveCacheMethod(cache, "reset");
|
||||
}
|
||||
}
|
||||
QueryInfo.prototype.init = function (query) {
|
||||
var networkStatus = query.networkStatus || NetworkStatus.loading;
|
||||
if (this.variables &&
|
||||
this.networkStatus !== NetworkStatus.loading &&
|
||||
!equal(this.variables, query.variables)) {
|
||||
networkStatus = NetworkStatus.setVariables;
|
||||
}
|
||||
if (!equal(query.variables, this.variables)) {
|
||||
this.lastDiff = void 0;
|
||||
}
|
||||
Object.assign(this, {
|
||||
document: query.document,
|
||||
variables: query.variables,
|
||||
networkError: null,
|
||||
graphQLErrors: this.graphQLErrors || [],
|
||||
networkStatus: networkStatus,
|
||||
});
|
||||
if (query.observableQuery) {
|
||||
this.setObservableQuery(query.observableQuery);
|
||||
}
|
||||
if (query.lastRequestId) {
|
||||
this.lastRequestId = query.lastRequestId;
|
||||
}
|
||||
return this;
|
||||
};
|
||||
QueryInfo.prototype.reset = function () {
|
||||
cancelNotifyTimeout(this);
|
||||
this.dirty = false;
|
||||
};
|
||||
QueryInfo.prototype.resetDiff = function () {
|
||||
this.lastDiff = void 0;
|
||||
};
|
||||
QueryInfo.prototype.getDiff = function () {
|
||||
var options = this.getDiffOptions();
|
||||
if (this.lastDiff && equal(options, this.lastDiff.options)) {
|
||||
return this.lastDiff.diff;
|
||||
}
|
||||
this.updateWatch(this.variables);
|
||||
var oq = this.observableQuery;
|
||||
if (oq && oq.options.fetchPolicy === "no-cache") {
|
||||
return { complete: false };
|
||||
}
|
||||
var diff = this.cache.diff(options);
|
||||
this.updateLastDiff(diff, options);
|
||||
return diff;
|
||||
};
|
||||
QueryInfo.prototype.updateLastDiff = function (diff, options) {
|
||||
this.lastDiff =
|
||||
diff ?
|
||||
{
|
||||
diff: diff,
|
||||
options: options || this.getDiffOptions(),
|
||||
}
|
||||
: void 0;
|
||||
};
|
||||
QueryInfo.prototype.getDiffOptions = function (variables) {
|
||||
var _a;
|
||||
if (variables === void 0) { variables = this.variables; }
|
||||
return {
|
||||
query: this.document,
|
||||
variables: variables,
|
||||
returnPartialData: true,
|
||||
optimistic: true,
|
||||
canonizeResults: (_a = this.observableQuery) === null || _a === void 0 ? void 0 : _a.options.canonizeResults,
|
||||
};
|
||||
};
|
||||
QueryInfo.prototype.setDiff = function (diff) {
|
||||
var _this = this;
|
||||
var _a;
|
||||
var oldDiff = this.lastDiff && this.lastDiff.diff;
|
||||
// If we do not tolerate partial results, skip this update to prevent it
|
||||
// from being reported. This prevents a situtuation where a query that
|
||||
// errors and another succeeds with overlapping data does not report the
|
||||
// partial data result to the errored query.
|
||||
//
|
||||
// See https://github.com/apollographql/apollo-client/issues/11400 for more
|
||||
// information on this issue.
|
||||
if (diff &&
|
||||
!diff.complete &&
|
||||
!((_a = this.observableQuery) === null || _a === void 0 ? void 0 : _a.options.returnPartialData) &&
|
||||
// In the case of a cache eviction, the diff will become partial so we
|
||||
// schedule a notification to send a network request (this.oqListener) to
|
||||
// go and fetch the missing data.
|
||||
!(oldDiff && oldDiff.complete)) {
|
||||
return;
|
||||
}
|
||||
this.updateLastDiff(diff);
|
||||
if (!this.dirty && !equal(oldDiff && oldDiff.result, diff && diff.result)) {
|
||||
this.dirty = true;
|
||||
if (!this.notifyTimeout) {
|
||||
this.notifyTimeout = setTimeout(function () { return _this.notify(); }, 0);
|
||||
}
|
||||
}
|
||||
};
|
||||
QueryInfo.prototype.setObservableQuery = function (oq) {
|
||||
var _this = this;
|
||||
if (oq === this.observableQuery)
|
||||
return;
|
||||
if (this.oqListener) {
|
||||
this.listeners.delete(this.oqListener);
|
||||
}
|
||||
this.observableQuery = oq;
|
||||
if (oq) {
|
||||
oq["queryInfo"] = this;
|
||||
this.listeners.add((this.oqListener = function () {
|
||||
var diff = _this.getDiff();
|
||||
if (diff.fromOptimisticTransaction) {
|
||||
// If this diff came from an optimistic transaction, deliver the
|
||||
// current cache data to the ObservableQuery, but don't perform a
|
||||
// reobservation, since oq.reobserveCacheFirst might make a network
|
||||
// request, and we never want to trigger network requests in the
|
||||
// middle of optimistic updates.
|
||||
oq["observe"]();
|
||||
}
|
||||
else {
|
||||
// Otherwise, make the ObservableQuery "reobserve" the latest data
|
||||
// using a temporary fetch policy of "cache-first", so complete cache
|
||||
// results have a chance to be delivered without triggering additional
|
||||
// network requests, even when options.fetchPolicy is "network-only"
|
||||
// or "cache-and-network". All other fetch policies are preserved by
|
||||
// this method, and are handled by calling oq.reobserve(). If this
|
||||
// reobservation is spurious, isDifferentFromLastResult still has a
|
||||
// chance to catch it before delivery to ObservableQuery subscribers.
|
||||
reobserveCacheFirst(oq);
|
||||
}
|
||||
}));
|
||||
}
|
||||
else {
|
||||
delete this.oqListener;
|
||||
}
|
||||
};
|
||||
QueryInfo.prototype.notify = function () {
|
||||
var _this = this;
|
||||
cancelNotifyTimeout(this);
|
||||
if (this.shouldNotify()) {
|
||||
this.listeners.forEach(function (listener) { return listener(_this); });
|
||||
}
|
||||
this.dirty = false;
|
||||
};
|
||||
QueryInfo.prototype.shouldNotify = function () {
|
||||
if (!this.dirty || !this.listeners.size) {
|
||||
return false;
|
||||
}
|
||||
if (isNetworkRequestInFlight(this.networkStatus) && this.observableQuery) {
|
||||
var fetchPolicy = this.observableQuery.options.fetchPolicy;
|
||||
if (fetchPolicy !== "cache-only" && fetchPolicy !== "cache-and-network") {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
QueryInfo.prototype.stop = function () {
|
||||
if (!this.stopped) {
|
||||
this.stopped = true;
|
||||
// Cancel the pending notify timeout
|
||||
this.reset();
|
||||
this.cancel();
|
||||
// Revert back to the no-op version of cancel inherited from
|
||||
// QueryInfo.prototype.
|
||||
this.cancel = QueryInfo.prototype.cancel;
|
||||
var oq = this.observableQuery;
|
||||
if (oq)
|
||||
oq.stopPolling();
|
||||
}
|
||||
};
|
||||
// This method is a no-op by default, until/unless overridden by the
|
||||
// updateWatch method.
|
||||
QueryInfo.prototype.cancel = function () { };
|
||||
QueryInfo.prototype.updateWatch = function (variables) {
|
||||
var _this = this;
|
||||
if (variables === void 0) { variables = this.variables; }
|
||||
var oq = this.observableQuery;
|
||||
if (oq && oq.options.fetchPolicy === "no-cache") {
|
||||
return;
|
||||
}
|
||||
var watchOptions = __assign(__assign({}, this.getDiffOptions(variables)), { watcher: this, callback: function (diff) { return _this.setDiff(diff); } });
|
||||
if (!this.lastWatch || !equal(watchOptions, this.lastWatch)) {
|
||||
this.cancel();
|
||||
this.cancel = this.cache.watch((this.lastWatch = watchOptions));
|
||||
}
|
||||
};
|
||||
QueryInfo.prototype.resetLastWrite = function () {
|
||||
this.lastWrite = void 0;
|
||||
};
|
||||
QueryInfo.prototype.shouldWrite = function (result, variables) {
|
||||
var lastWrite = this.lastWrite;
|
||||
return !(lastWrite &&
|
||||
// If cache.evict has been called since the last time we wrote this
|
||||
// data into the cache, there's a chance writing this result into
|
||||
// the cache will repair what was evicted.
|
||||
lastWrite.dmCount === destructiveMethodCounts.get(this.cache) &&
|
||||
equal(variables, lastWrite.variables) &&
|
||||
equal(result.data, lastWrite.result.data));
|
||||
};
|
||||
QueryInfo.prototype.markResult = function (result, document, options, cacheWriteBehavior) {
|
||||
var _this = this;
|
||||
var merger = new DeepMerger();
|
||||
var graphQLErrors = isNonEmptyArray(result.errors) ? result.errors.slice(0) : [];
|
||||
// Cancel the pending notify timeout (if it exists) to prevent extraneous network
|
||||
// requests. To allow future notify timeouts, diff and dirty are reset as well.
|
||||
this.reset();
|
||||
if ("incremental" in result && isNonEmptyArray(result.incremental)) {
|
||||
var mergedData = mergeIncrementalData(this.getDiff().result, result);
|
||||
result.data = mergedData;
|
||||
// Detect the first chunk of a deferred query and merge it with existing
|
||||
// cache data. This ensures a `cache-first` fetch policy that returns
|
||||
// partial cache data or a `cache-and-network` fetch policy that already
|
||||
// has full data in the cache does not complain when trying to merge the
|
||||
// initial deferred server data with existing cache data.
|
||||
}
|
||||
else if ("hasNext" in result && result.hasNext) {
|
||||
var diff = this.getDiff();
|
||||
result.data = merger.merge(diff.result, result.data);
|
||||
}
|
||||
this.graphQLErrors = graphQLErrors;
|
||||
if (options.fetchPolicy === "no-cache") {
|
||||
this.updateLastDiff({ result: result.data, complete: true }, this.getDiffOptions(options.variables));
|
||||
}
|
||||
else if (cacheWriteBehavior !== 0 /* CacheWriteBehavior.FORBID */) {
|
||||
if (shouldWriteResult(result, options.errorPolicy)) {
|
||||
// Using a transaction here so we have a chance to read the result
|
||||
// back from the cache before the watch callback fires as a result
|
||||
// of writeQuery, so we can store the new diff quietly and ignore
|
||||
// it when we receive it redundantly from the watch callback.
|
||||
this.cache.performTransaction(function (cache) {
|
||||
if (_this.shouldWrite(result, options.variables)) {
|
||||
cache.writeQuery({
|
||||
query: document,
|
||||
data: result.data,
|
||||
variables: options.variables,
|
||||
overwrite: cacheWriteBehavior === 1 /* CacheWriteBehavior.OVERWRITE */,
|
||||
});
|
||||
_this.lastWrite = {
|
||||
result: result,
|
||||
variables: options.variables,
|
||||
dmCount: destructiveMethodCounts.get(_this.cache),
|
||||
};
|
||||
}
|
||||
else {
|
||||
// If result is the same as the last result we received from
|
||||
// the network (and the variables match too), avoid writing
|
||||
// result into the cache again. The wisdom of skipping this
|
||||
// cache write is far from obvious, since any cache write
|
||||
// could be the one that puts the cache back into a desired
|
||||
// state, fixing corruption or missing data. However, if we
|
||||
// always write every network result into the cache, we enable
|
||||
// feuds between queries competing to update the same data in
|
||||
// incompatible ways, which can lead to an endless cycle of
|
||||
// cache broadcasts and useless network requests. As with any
|
||||
// feud, eventually one side must step back from the brink,
|
||||
// letting the other side(s) have the last word(s). There may
|
||||
// be other points where we could break this cycle, such as
|
||||
// silencing the broadcast for cache.writeQuery (not a good
|
||||
// idea, since it just delays the feud a bit) or somehow
|
||||
// avoiding the network request that just happened (also bad,
|
||||
// because the server could return useful new data). All
|
||||
// options considered, skipping this cache write seems to be
|
||||
// the least damaging place to break the cycle, because it
|
||||
// reflects the intuition that we recently wrote this exact
|
||||
// result into the cache, so the cache *should* already/still
|
||||
// contain this data. If some other query has clobbered that
|
||||
// data in the meantime, that's too bad, but there will be no
|
||||
// winners if every query blindly reverts to its own version
|
||||
// of the data. This approach also gives the network a chance
|
||||
// to return new data, which will be written into the cache as
|
||||
// usual, notifying only those queries that are directly
|
||||
// affected by the cache updates, as usual. In the future, an
|
||||
// even more sophisticated cache could perhaps prevent or
|
||||
// mitigate the clobbering somehow, but that would make this
|
||||
// particular cache write even less important, and thus
|
||||
// skipping it would be even safer than it is today.
|
||||
if (_this.lastDiff && _this.lastDiff.diff.complete) {
|
||||
// Reuse data from the last good (complete) diff that we
|
||||
// received, when possible.
|
||||
result.data = _this.lastDiff.diff.result;
|
||||
return;
|
||||
}
|
||||
// If the previous this.diff was incomplete, fall through to
|
||||
// re-reading the latest data with cache.diff, below.
|
||||
}
|
||||
var diffOptions = _this.getDiffOptions(options.variables);
|
||||
var diff = cache.diff(diffOptions);
|
||||
// In case the QueryManager stops this QueryInfo before its
|
||||
// results are delivered, it's important to avoid restarting the
|
||||
// cache watch when markResult is called. We also avoid updating
|
||||
// the watch if we are writing a result that doesn't match the current
|
||||
// variables to avoid race conditions from broadcasting the wrong
|
||||
// result.
|
||||
if (!_this.stopped && equal(_this.variables, options.variables)) {
|
||||
// Any time we're about to update this.diff, we need to make
|
||||
// sure we've started watching the cache.
|
||||
_this.updateWatch(options.variables);
|
||||
}
|
||||
// If we're allowed to write to the cache, and we can read a
|
||||
// complete result from the cache, update result.data to be the
|
||||
// result from the cache, rather than the raw network result.
|
||||
// Set without setDiff to avoid triggering a notify call, since
|
||||
// we have other ways of notifying for this result.
|
||||
_this.updateLastDiff(diff, diffOptions);
|
||||
if (diff.complete) {
|
||||
result.data = diff.result;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
this.lastWrite = void 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
QueryInfo.prototype.markReady = function () {
|
||||
this.networkError = null;
|
||||
return (this.networkStatus = NetworkStatus.ready);
|
||||
};
|
||||
QueryInfo.prototype.markError = function (error) {
|
||||
this.networkStatus = NetworkStatus.error;
|
||||
this.lastWrite = void 0;
|
||||
this.reset();
|
||||
if (error.graphQLErrors) {
|
||||
this.graphQLErrors = error.graphQLErrors;
|
||||
}
|
||||
if (error.networkError) {
|
||||
this.networkError = error.networkError;
|
||||
}
|
||||
return error;
|
||||
};
|
||||
return QueryInfo;
|
||||
}());
|
||||
export { QueryInfo };
|
||||
export function shouldWriteResult(result, errorPolicy) {
|
||||
if (errorPolicy === void 0) { errorPolicy = "none"; }
|
||||
var ignoreErrors = errorPolicy === "ignore" || errorPolicy === "all";
|
||||
var writeWithErrors = !graphQLResultHasError(result);
|
||||
if (!writeWithErrors && ignoreErrors && result.data) {
|
||||
writeWithErrors = true;
|
||||
}
|
||||
return writeWithErrors;
|
||||
}
|
||||
//# sourceMappingURL=QueryInfo.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/core/QueryInfo.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/QueryInfo.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
132
graphql-subscription/node_modules/@apollo/client/core/QueryManager.d.ts
generated
vendored
Normal file
132
graphql-subscription/node_modules/@apollo/client/core/QueryManager.d.ts
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
import type { DocumentNode } from "graphql";
|
||||
import type { ApolloLink, FetchResult } from "../link/core/index.js";
|
||||
import type { Cache, ApolloCache } from "../cache/index.js";
|
||||
import { Observable, DocumentTransform } from "../utilities/index.js";
|
||||
import type { QueryOptions, WatchQueryOptions, SubscriptionOptions, MutationOptions, ErrorPolicy, MutationFetchPolicy } from "./watchQueryOptions.js";
|
||||
import { ObservableQuery } from "./ObservableQuery.js";
|
||||
import { NetworkStatus } from "./networkStatus.js";
|
||||
import type { ApolloQueryResult, OperationVariables, MutationUpdaterFunction, OnQueryUpdated, InternalRefetchQueriesInclude, InternalRefetchQueriesOptions, InternalRefetchQueriesMap, DefaultContext } from "./types.js";
|
||||
import { LocalState } from "./LocalState.js";
|
||||
import type { QueryStoreValue } from "./QueryInfo.js";
|
||||
interface MutationStoreValue {
|
||||
mutation: DocumentNode;
|
||||
variables: Record<string, any>;
|
||||
loading: boolean;
|
||||
error: Error | null;
|
||||
}
|
||||
type UpdateQueries<TData> = MutationOptions<TData, any, any>["updateQueries"];
|
||||
interface TransformCacheEntry {
|
||||
hasClientExports: boolean;
|
||||
hasForcedResolvers: boolean;
|
||||
hasNonreactiveDirective: boolean;
|
||||
clientQuery: DocumentNode | null;
|
||||
serverQuery: DocumentNode | null;
|
||||
defaultVars: OperationVariables;
|
||||
asQuery: DocumentNode;
|
||||
}
|
||||
import type { DefaultOptions } from "./ApolloClient.js";
|
||||
import { Trie } from "@wry/trie";
|
||||
export declare class QueryManager<TStore> {
|
||||
cache: ApolloCache<TStore>;
|
||||
link: ApolloLink;
|
||||
defaultOptions: DefaultOptions;
|
||||
readonly assumeImmutableResults: boolean;
|
||||
readonly documentTransform: DocumentTransform;
|
||||
readonly ssrMode: boolean;
|
||||
readonly defaultContext: Partial<DefaultContext>;
|
||||
private queryDeduplication;
|
||||
private clientAwareness;
|
||||
private localState;
|
||||
private onBroadcast?;
|
||||
mutationStore?: {
|
||||
[mutationId: string]: MutationStoreValue;
|
||||
};
|
||||
private queries;
|
||||
protected fetchCancelFns: Map<string, (error: any) => any>;
|
||||
constructor({ cache, link, defaultOptions, documentTransform, queryDeduplication, onBroadcast, ssrMode, clientAwareness, localState, assumeImmutableResults, defaultContext, }: {
|
||||
cache: ApolloCache<TStore>;
|
||||
link: ApolloLink;
|
||||
defaultOptions?: DefaultOptions;
|
||||
documentTransform?: DocumentTransform;
|
||||
queryDeduplication?: boolean;
|
||||
onBroadcast?: () => void;
|
||||
ssrMode?: boolean;
|
||||
clientAwareness?: Record<string, string>;
|
||||
localState?: LocalState<TStore>;
|
||||
assumeImmutableResults?: boolean;
|
||||
defaultContext?: Partial<DefaultContext>;
|
||||
});
|
||||
/**
|
||||
* Call this method to terminate any active query processes, making it safe
|
||||
* to dispose of this QueryManager instance.
|
||||
*/
|
||||
stop(): void;
|
||||
private cancelPendingFetches;
|
||||
mutate<TData, TVariables extends OperationVariables, TContext extends Record<string, any>, TCache extends ApolloCache<any>>({ mutation, variables, optimisticResponse, updateQueries, refetchQueries, awaitRefetchQueries, update: updateWithProxyFn, onQueryUpdated, fetchPolicy, errorPolicy, keepRootFields, context, }: MutationOptions<TData, TVariables, TContext>): Promise<FetchResult<TData>>;
|
||||
markMutationResult<TData, TVariables, TContext, TCache extends ApolloCache<any>>(mutation: {
|
||||
mutationId: string;
|
||||
result: FetchResult<TData>;
|
||||
document: DocumentNode;
|
||||
variables?: TVariables;
|
||||
fetchPolicy?: MutationFetchPolicy;
|
||||
errorPolicy: ErrorPolicy;
|
||||
context?: TContext;
|
||||
updateQueries: UpdateQueries<TData>;
|
||||
update?: MutationUpdaterFunction<TData, TVariables, TContext, TCache>;
|
||||
awaitRefetchQueries?: boolean;
|
||||
refetchQueries?: InternalRefetchQueriesInclude;
|
||||
removeOptimistic?: string;
|
||||
onQueryUpdated?: OnQueryUpdated<any>;
|
||||
keepRootFields?: boolean;
|
||||
}, cache?: ApolloCache<TStore>): Promise<FetchResult<TData>>;
|
||||
markMutationOptimistic<TData, TVariables, TContext, TCache extends ApolloCache<any>>(optimisticResponse: any, mutation: {
|
||||
mutationId: string;
|
||||
document: DocumentNode;
|
||||
variables?: TVariables;
|
||||
fetchPolicy?: MutationFetchPolicy;
|
||||
errorPolicy: ErrorPolicy;
|
||||
context?: TContext;
|
||||
updateQueries: UpdateQueries<TData>;
|
||||
update?: MutationUpdaterFunction<TData, TVariables, TContext, TCache>;
|
||||
keepRootFields?: boolean;
|
||||
}): boolean;
|
||||
fetchQuery<TData, TVars extends OperationVariables>(queryId: string, options: WatchQueryOptions<TVars, TData>, networkStatus?: NetworkStatus): Promise<ApolloQueryResult<TData>>;
|
||||
getQueryStore(): Record<string, QueryStoreValue>;
|
||||
resetErrors(queryId: string): void;
|
||||
transform(document: DocumentNode): DocumentNode;
|
||||
private transformCache;
|
||||
getDocumentInfo(document: DocumentNode): TransformCacheEntry;
|
||||
private getVariables;
|
||||
watchQuery<T, TVariables extends OperationVariables = OperationVariables>(options: WatchQueryOptions<TVariables, T>): ObservableQuery<T, TVariables>;
|
||||
query<TData, TVars extends OperationVariables = OperationVariables>(options: QueryOptions<TVars, TData>, queryId?: string): Promise<ApolloQueryResult<TData>>;
|
||||
private queryIdCounter;
|
||||
generateQueryId(): string;
|
||||
private requestIdCounter;
|
||||
generateRequestId(): number;
|
||||
private mutationIdCounter;
|
||||
generateMutationId(): string;
|
||||
stopQueryInStore(queryId: string): void;
|
||||
private stopQueryInStoreNoBroadcast;
|
||||
clearStore(options?: Cache.ResetOptions): Promise<void>;
|
||||
getObservableQueries(include?: InternalRefetchQueriesInclude): Map<string, ObservableQuery<any, OperationVariables>>;
|
||||
reFetchObservableQueries(includeStandby?: boolean): Promise<ApolloQueryResult<any>[]>;
|
||||
setObservableQuery(observableQuery: ObservableQuery<any, any>): void;
|
||||
startGraphQLSubscription<T = any>({ query, fetchPolicy, errorPolicy, variables, context, }: SubscriptionOptions): Observable<FetchResult<T>>;
|
||||
stopQuery(queryId: string): void;
|
||||
private stopQueryNoBroadcast;
|
||||
removeQuery(queryId: string): void;
|
||||
broadcastQueries(): void;
|
||||
getLocalState(): LocalState<TStore>;
|
||||
protected inFlightLinkObservables: Trie<{
|
||||
observable?: Observable<FetchResult<any>> | undefined;
|
||||
}>;
|
||||
private getObservableFromLink;
|
||||
private getResultsFromLink;
|
||||
private fetchConcastWithInfo;
|
||||
refetchQueries<TResult>({ updateCache, include, optimistic, removeOptimistic, onQueryUpdated, }: InternalRefetchQueriesOptions<ApolloCache<TStore>, TResult>): InternalRefetchQueriesMap<TResult>;
|
||||
private fetchQueryByPolicy;
|
||||
private getQuery;
|
||||
private prepareContext;
|
||||
}
|
||||
export {};
|
||||
//# sourceMappingURL=QueryManager.d.ts.map
|
||||
1123
graphql-subscription/node_modules/@apollo/client/core/QueryManager.js
generated
vendored
Normal file
1123
graphql-subscription/node_modules/@apollo/client/core/QueryManager.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
graphql-subscription/node_modules/@apollo/client/core/QueryManager.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/QueryManager.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2642
graphql-subscription/node_modules/@apollo/client/core/core.cjs
generated
vendored
Normal file
2642
graphql-subscription/node_modules/@apollo/client/core/core.cjs
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
graphql-subscription/node_modules/@apollo/client/core/core.cjs.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/core.cjs.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2642
graphql-subscription/node_modules/@apollo/client/core/core.cjs.native.js
generated
vendored
Normal file
2642
graphql-subscription/node_modules/@apollo/client/core/core.cjs.native.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4
graphql-subscription/node_modules/@apollo/client/core/equalByQuery.d.ts
generated
vendored
Normal file
4
graphql-subscription/node_modules/@apollo/client/core/equalByQuery.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
import type { DocumentNode } from "graphql";
|
||||
import type { ApolloQueryResult, OperationVariables } from "./types.js";
|
||||
export declare function equalByQuery(query: DocumentNode, { data: aData, ...aRest }: Partial<ApolloQueryResult<unknown>>, { data: bData, ...bRest }: Partial<ApolloQueryResult<unknown>>, variables?: OperationVariables): boolean;
|
||||
//# sourceMappingURL=equalByQuery.d.ts.map
|
||||
87
graphql-subscription/node_modules/@apollo/client/core/equalByQuery.js
generated
vendored
Normal file
87
graphql-subscription/node_modules/@apollo/client/core/equalByQuery.js
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
import { __rest } from "tslib";
|
||||
import equal from "@wry/equality";
|
||||
import { createFragmentMap, getFragmentDefinitions, getFragmentFromSelection, getMainDefinition, isField, resultKeyNameFromField, shouldInclude, } from "../utilities/index.js";
|
||||
// Returns true if aResult and bResult are deeply equal according to the fields
|
||||
// selected by the given query, ignoring any fields marked as @nonreactive.
|
||||
export function equalByQuery(query, _a, _b, variables) {
|
||||
var aData = _a.data, aRest = __rest(_a, ["data"]);
|
||||
var bData = _b.data, bRest = __rest(_b, ["data"]);
|
||||
return (equal(aRest, bRest) &&
|
||||
equalBySelectionSet(getMainDefinition(query).selectionSet, aData, bData, {
|
||||
fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
|
||||
variables: variables,
|
||||
}));
|
||||
}
|
||||
function equalBySelectionSet(selectionSet, aResult, bResult, context) {
|
||||
if (aResult === bResult) {
|
||||
return true;
|
||||
}
|
||||
var seenSelections = new Set();
|
||||
// Returning true from this Array.prototype.every callback function skips the
|
||||
// current field/subtree. Returning false aborts the entire traversal
|
||||
// immediately, causing equalBySelectionSet to return false.
|
||||
return selectionSet.selections.every(function (selection) {
|
||||
// Avoid re-processing the same selection at the same level of recursion, in
|
||||
// case the same field gets included via multiple indirect fragment spreads.
|
||||
if (seenSelections.has(selection))
|
||||
return true;
|
||||
seenSelections.add(selection);
|
||||
// Ignore @skip(if: true) and @include(if: false) fields.
|
||||
if (!shouldInclude(selection, context.variables))
|
||||
return true;
|
||||
// If the field or (named) fragment spread has a @nonreactive directive on
|
||||
// it, we don't care if it's different, so we pretend it's the same.
|
||||
if (selectionHasNonreactiveDirective(selection))
|
||||
return true;
|
||||
if (isField(selection)) {
|
||||
var resultKey = resultKeyNameFromField(selection);
|
||||
var aResultChild = aResult && aResult[resultKey];
|
||||
var bResultChild = bResult && bResult[resultKey];
|
||||
var childSelectionSet = selection.selectionSet;
|
||||
if (!childSelectionSet) {
|
||||
// These are scalar values, so we can compare them with deep equal
|
||||
// without redoing the main recursive work.
|
||||
return equal(aResultChild, bResultChild);
|
||||
}
|
||||
var aChildIsArray = Array.isArray(aResultChild);
|
||||
var bChildIsArray = Array.isArray(bResultChild);
|
||||
if (aChildIsArray !== bChildIsArray)
|
||||
return false;
|
||||
if (aChildIsArray && bChildIsArray) {
|
||||
var length_1 = aResultChild.length;
|
||||
if (bResultChild.length !== length_1) {
|
||||
return false;
|
||||
}
|
||||
for (var i = 0; i < length_1; ++i) {
|
||||
if (!equalBySelectionSet(childSelectionSet, aResultChild[i], bResultChild[i], context)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return equalBySelectionSet(childSelectionSet, aResultChild, bResultChild, context);
|
||||
}
|
||||
else {
|
||||
var fragment = getFragmentFromSelection(selection, context.fragmentMap);
|
||||
if (fragment) {
|
||||
// The fragment might === selection if it's an inline fragment, but
|
||||
// could be !== if it's a named fragment ...spread.
|
||||
if (selectionHasNonreactiveDirective(fragment))
|
||||
return true;
|
||||
return equalBySelectionSet(fragment.selectionSet,
|
||||
// Notice that we reuse the same aResult and bResult values here,
|
||||
// since the fragment ...spread does not specify a field name, but
|
||||
// consists of multiple fields (within the fragment's selection set)
|
||||
// that should be applied to the current result value(s).
|
||||
aResult, bResult, context);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
function selectionHasNonreactiveDirective(selection) {
|
||||
return (!!selection.directives && selection.directives.some(directiveIsNonreactive));
|
||||
}
|
||||
function directiveIsNonreactive(dir) {
|
||||
return dir.name.value === "nonreactive";
|
||||
}
|
||||
//# sourceMappingURL=equalByQuery.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/core/equalByQuery.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/equalByQuery.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
22
graphql-subscription/node_modules/@apollo/client/core/index.d.ts
generated
vendored
Normal file
22
graphql-subscription/node_modules/@apollo/client/core/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
export type { ApolloClientOptions, DefaultOptions } from "./ApolloClient.js";
|
||||
export { ApolloClient, mergeOptions } from "./ApolloClient.js";
|
||||
export type { FetchMoreOptions, UpdateQueryOptions, } from "./ObservableQuery.js";
|
||||
export { ObservableQuery } from "./ObservableQuery.js";
|
||||
export type { QueryOptions, WatchQueryOptions, MutationOptions, SubscriptionOptions, FetchPolicy, WatchQueryFetchPolicy, MutationFetchPolicy, RefetchWritePolicy, ErrorPolicy, FetchMoreQueryOptions, SubscribeToMoreOptions, } from "./watchQueryOptions.js";
|
||||
export { NetworkStatus, isNetworkRequestSettled } from "./networkStatus.js";
|
||||
export * from "./types.js";
|
||||
export type { Resolver, FragmentMatcher } from "./LocalState.js";
|
||||
export { isApolloError, ApolloError } from "../errors/index.js";
|
||||
export type { Transaction, DataProxy, InMemoryCacheConfig, ReactiveVar, TypePolicies, TypePolicy, FieldPolicy, FieldReadFunction, FieldMergeFunction, FieldFunctionOptions, PossibleTypesMap, } from "../cache/index.js";
|
||||
export { Cache, ApolloCache, InMemoryCache, MissingFieldError, defaultDataIdFromObject, makeVar, } from "../cache/index.js";
|
||||
export * from "../cache/inmemory/types.js";
|
||||
export * from "../link/core/index.js";
|
||||
export * from "../link/http/index.js";
|
||||
export type { ServerError } from "../link/utils/index.js";
|
||||
export { fromError, toPromise, fromPromise, throwServerError, } from "../link/utils/index.js";
|
||||
export type { DocumentTransformCacheKey, Observer, ObservableSubscription, Reference, StoreObject, } from "../utilities/index.js";
|
||||
export { DocumentTransform, Observable, isReference, makeReference, } from "../utilities/index.js";
|
||||
import { setVerbosity } from "ts-invariant";
|
||||
export { setVerbosity as setLogVerbosity };
|
||||
export { gql, resetCaches, disableFragmentWarnings, enableExperimentalFragmentVariables, disableExperimentalFragmentVariables, } from "graphql-tag";
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
35
graphql-subscription/node_modules/@apollo/client/core/index.js
generated
vendored
Normal file
35
graphql-subscription/node_modules/@apollo/client/core/index.js
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
/* Core */
|
||||
export { ApolloClient, mergeOptions } from "./ApolloClient.js";
|
||||
export { ObservableQuery } from "./ObservableQuery.js";
|
||||
export { NetworkStatus, isNetworkRequestSettled } from "./networkStatus.js";
|
||||
export * from "./types.js";
|
||||
export { isApolloError, ApolloError } from "../errors/index.js";
|
||||
export { Cache, ApolloCache, InMemoryCache, MissingFieldError, defaultDataIdFromObject, makeVar, } from "../cache/index.js";
|
||||
export * from "../cache/inmemory/types.js";
|
||||
/* Link */
|
||||
export * from "../link/core/index.js";
|
||||
export * from "../link/http/index.js";
|
||||
export { fromError, toPromise, fromPromise, throwServerError, } from "../link/utils/index.js";
|
||||
export { DocumentTransform, Observable, isReference, makeReference, } from "../utilities/index.js";
|
||||
/* Supporting */
|
||||
// The verbosity of invariant.{log,warn,error} can be controlled globally
|
||||
// (for anyone using the same ts-invariant package) by passing "log",
|
||||
// "warn", "error", or "silent" to setVerbosity ("log" is the default).
|
||||
// Note that all invariant.* logging is hidden in production.
|
||||
import { setVerbosity } from "ts-invariant";
|
||||
export { setVerbosity as setLogVerbosity };
|
||||
setVerbosity(globalThis.__DEV__ !== false ? "log" : "silent");
|
||||
// Note that importing `gql` by itself, then destructuring
|
||||
// additional properties separately before exporting, is intentional.
|
||||
// Due to the way the `graphql-tag` library is setup, certain bundlers
|
||||
// can't find the properties added to the exported `gql` function without
|
||||
// additional guidance (e.g. Rollup - see
|
||||
// https://rollupjs.org/guide/en/#error-name-is-not-exported-by-module).
|
||||
// Instead of having people that are using bundlers with `@apollo/client` add
|
||||
// extra bundler config to help `graphql-tag` exports be found (which would be
|
||||
// awkward since they aren't importing `graphql-tag` themselves), this
|
||||
// workaround of pulling the extra properties off the `gql` function,
|
||||
// then re-exporting them separately, helps keeps bundlers happy without any
|
||||
// additional config changes.
|
||||
export { gql, resetCaches, disableFragmentWarnings, enableExperimentalFragmentVariables, disableExperimentalFragmentVariables, } from "graphql-tag";
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/core/index.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/core/index.ts"],"names":[],"mappings":"AAAA,UAAU;AAGV,OAAO,EAAE,YAAY,EAAE,YAAY,EAAE,MAAM,mBAAmB,CAAC;AAK/D,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAC;AAcvD,OAAO,EAAE,aAAa,EAAE,uBAAuB,EAAE,MAAM,oBAAoB,CAAC;AAC5E,cAAc,YAAY,CAAC;AAE3B,OAAO,EAAE,aAAa,EAAE,WAAW,EAAE,MAAM,oBAAoB,CAAC;AAkBhE,OAAO,EACL,KAAK,EACL,WAAW,EACX,aAAa,EACb,iBAAiB,EACjB,uBAAuB,EACvB,OAAO,GACR,MAAM,mBAAmB,CAAC;AAE3B,cAAc,4BAA4B,CAAC;AAE3C,UAAU;AAEV,cAAc,uBAAuB,CAAC;AACtC,cAAc,uBAAuB,CAAC;AAEtC,OAAO,EACL,SAAS,EACT,SAAS,EACT,WAAW,EACX,gBAAgB,GACjB,MAAM,wBAAwB,CAAC;AAWhC,OAAO,EACL,iBAAiB,EACjB,UAAU,EACV,WAAW,EACX,aAAa,GACd,MAAM,uBAAuB,CAAC;AAE/B,gBAAgB;AAEhB,yEAAyE;AACzE,qEAAqE;AACrE,uEAAuE;AACvE,6DAA6D;AAC7D,OAAO,EAAE,YAAY,EAAE,MAAM,cAAc,CAAC;AAC5C,OAAO,EAAE,YAAY,IAAI,eAAe,EAAE,CAAC;AAC3C,YAAY,CAAC,OAAO,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC;AAEzC,0DAA0D;AAC1D,qEAAqE;AACrE,sEAAsE;AACtE,yEAAyE;AACzE,yCAAyC;AACzC,wEAAwE;AACxE,6EAA6E;AAC7E,8EAA8E;AAC9E,sEAAsE;AACtE,qEAAqE;AACrE,4EAA4E;AAC5E,6BAA6B;AAC7B,OAAO,EACL,GAAG,EACH,WAAW,EACX,uBAAuB,EACvB,mCAAmC,EACnC,oCAAoC,GACrC,MAAM,aAAa,CAAC","sourcesContent":["/* Core */\n\nexport type { ApolloClientOptions, DefaultOptions } from \"./ApolloClient.js\";\nexport { ApolloClient, mergeOptions } from \"./ApolloClient.js\";\nexport type {\n FetchMoreOptions,\n UpdateQueryOptions,\n} from \"./ObservableQuery.js\";\nexport { ObservableQuery } from \"./ObservableQuery.js\";\nexport type {\n QueryOptions,\n WatchQueryOptions,\n MutationOptions,\n SubscriptionOptions,\n FetchPolicy,\n WatchQueryFetchPolicy,\n MutationFetchPolicy,\n RefetchWritePolicy,\n ErrorPolicy,\n FetchMoreQueryOptions,\n SubscribeToMoreOptions,\n} from \"./watchQueryOptions.js\";\nexport { NetworkStatus, isNetworkRequestSettled } from \"./networkStatus.js\";\nexport * from \"./types.js\";\nexport type { Resolver, FragmentMatcher } from \"./LocalState.js\";\nexport { isApolloError, ApolloError } from \"../errors/index.js\";\n/* Cache */\n\nexport type {\n // All the exports (types) from ../cache, minus cacheSlot,\n // which we want to keep semi-private.\n Transaction,\n DataProxy,\n InMemoryCacheConfig,\n ReactiveVar,\n TypePolicies,\n TypePolicy,\n FieldPolicy,\n FieldReadFunction,\n FieldMergeFunction,\n FieldFunctionOptions,\n PossibleTypesMap,\n} from \"../cache/index.js\";\nexport {\n Cache,\n ApolloCache,\n InMemoryCache,\n MissingFieldError,\n defaultDataIdFromObject,\n makeVar,\n} from \"../cache/index.js\";\n\nexport * from \"../cache/inmemory/types.js\";\n\n/* Link */\n\nexport * from \"../link/core/index.js\";\nexport * from \"../link/http/index.js\";\nexport type { ServerError } from \"../link/utils/index.js\";\nexport {\n fromError,\n toPromise,\n fromPromise,\n throwServerError,\n} from \"../link/utils/index.js\";\n\n/* Utilities */\n\nexport type {\n DocumentTransformCacheKey,\n Observer,\n ObservableSubscription,\n Reference,\n StoreObject,\n} from \"../utilities/index.js\";\nexport {\n DocumentTransform,\n Observable,\n isReference,\n makeReference,\n} from \"../utilities/index.js\";\n\n/* Supporting */\n\n// The verbosity of invariant.{log,warn,error} can be controlled globally\n// (for anyone using the same ts-invariant package) by passing \"log\",\n// \"warn\", \"error\", or \"silent\" to setVerbosity (\"log\" is the default).\n// Note that all invariant.* logging is hidden in production.\nimport { setVerbosity } from \"ts-invariant\";\nexport { setVerbosity as setLogVerbosity };\nsetVerbosity(__DEV__ ? \"log\" : \"silent\");\n\n// Note that importing `gql` by itself, then destructuring\n// additional properties separately before exporting, is intentional.\n// Due to the way the `graphql-tag` library is setup, certain bundlers\n// can't find the properties added to the exported `gql` function without\n// additional guidance (e.g. Rollup - see\n// https://rollupjs.org/guide/en/#error-name-is-not-exported-by-module).\n// Instead of having people that are using bundlers with `@apollo/client` add\n// extra bundler config to help `graphql-tag` exports be found (which would be\n// awkward since they aren't importing `graphql-tag` themselves), this\n// workaround of pulling the extra properties off the `gql` function,\n// then re-exporting them separately, helps keeps bundlers happy without any\n// additional config changes.\nexport {\n gql,\n resetCaches,\n disableFragmentWarnings,\n enableExperimentalFragmentVariables,\n disableExperimentalFragmentVariables,\n} from \"graphql-tag\";\n"]}
|
||||
51
graphql-subscription/node_modules/@apollo/client/core/networkStatus.d.ts
generated
vendored
Normal file
51
graphql-subscription/node_modules/@apollo/client/core/networkStatus.d.ts
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
/**
|
||||
* The current status of a query’s execution in our system.
|
||||
*/
|
||||
export declare enum NetworkStatus {
|
||||
/**
|
||||
* The query has never been run before and the query is now currently running. A query will still
|
||||
* have this network status even if a partial data result was returned from the cache, but a
|
||||
* query was dispatched anyway.
|
||||
*/
|
||||
loading = 1,
|
||||
/**
|
||||
* If `setVariables` was called and a query was fired because of that then the network status
|
||||
* will be `setVariables` until the result of that query comes back.
|
||||
*/
|
||||
setVariables = 2,
|
||||
/**
|
||||
* Indicates that `fetchMore` was called on this query and that the query created is currently in
|
||||
* flight.
|
||||
*/
|
||||
fetchMore = 3,
|
||||
/**
|
||||
* Similar to the `setVariables` network status. It means that `refetch` was called on a query
|
||||
* and the refetch request is currently in flight.
|
||||
*/
|
||||
refetch = 4,
|
||||
/**
|
||||
* Indicates that a polling query is currently in flight. So for example if you are polling a
|
||||
* query every 10 seconds then the network status will switch to `poll` every 10 seconds whenever
|
||||
* a poll request has been sent but not resolved.
|
||||
*/
|
||||
poll = 6,
|
||||
/**
|
||||
* No request is in flight for this query, and no errors happened. Everything is OK.
|
||||
*/
|
||||
ready = 7,
|
||||
/**
|
||||
* No request is in flight for this query, but one or more errors were detected.
|
||||
*/
|
||||
error = 8
|
||||
}
|
||||
/**
|
||||
* Returns true if there is currently a network request in flight according to a given network
|
||||
* status.
|
||||
*/
|
||||
export declare function isNetworkRequestInFlight(networkStatus?: NetworkStatus): boolean;
|
||||
/**
|
||||
* Returns true if the network request is in ready or error state according to a given network
|
||||
* status.
|
||||
*/
|
||||
export declare function isNetworkRequestSettled(networkStatus?: NetworkStatus): boolean;
|
||||
//# sourceMappingURL=networkStatus.d.ts.map
|
||||
56
graphql-subscription/node_modules/@apollo/client/core/networkStatus.js
generated
vendored
Normal file
56
graphql-subscription/node_modules/@apollo/client/core/networkStatus.js
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
/**
|
||||
* The current status of a query’s execution in our system.
|
||||
*/
|
||||
export var NetworkStatus;
|
||||
(function (NetworkStatus) {
|
||||
/**
|
||||
* The query has never been run before and the query is now currently running. A query will still
|
||||
* have this network status even if a partial data result was returned from the cache, but a
|
||||
* query was dispatched anyway.
|
||||
*/
|
||||
NetworkStatus[NetworkStatus["loading"] = 1] = "loading";
|
||||
/**
|
||||
* If `setVariables` was called and a query was fired because of that then the network status
|
||||
* will be `setVariables` until the result of that query comes back.
|
||||
*/
|
||||
NetworkStatus[NetworkStatus["setVariables"] = 2] = "setVariables";
|
||||
/**
|
||||
* Indicates that `fetchMore` was called on this query and that the query created is currently in
|
||||
* flight.
|
||||
*/
|
||||
NetworkStatus[NetworkStatus["fetchMore"] = 3] = "fetchMore";
|
||||
/**
|
||||
* Similar to the `setVariables` network status. It means that `refetch` was called on a query
|
||||
* and the refetch request is currently in flight.
|
||||
*/
|
||||
NetworkStatus[NetworkStatus["refetch"] = 4] = "refetch";
|
||||
/**
|
||||
* Indicates that a polling query is currently in flight. So for example if you are polling a
|
||||
* query every 10 seconds then the network status will switch to `poll` every 10 seconds whenever
|
||||
* a poll request has been sent but not resolved.
|
||||
*/
|
||||
NetworkStatus[NetworkStatus["poll"] = 6] = "poll";
|
||||
/**
|
||||
* No request is in flight for this query, and no errors happened. Everything is OK.
|
||||
*/
|
||||
NetworkStatus[NetworkStatus["ready"] = 7] = "ready";
|
||||
/**
|
||||
* No request is in flight for this query, but one or more errors were detected.
|
||||
*/
|
||||
NetworkStatus[NetworkStatus["error"] = 8] = "error";
|
||||
})(NetworkStatus || (NetworkStatus = {}));
|
||||
/**
|
||||
* Returns true if there is currently a network request in flight according to a given network
|
||||
* status.
|
||||
*/
|
||||
export function isNetworkRequestInFlight(networkStatus) {
|
||||
return networkStatus ? networkStatus < 7 : false;
|
||||
}
|
||||
/**
|
||||
* Returns true if the network request is in ready or error state according to a given network
|
||||
* status.
|
||||
*/
|
||||
export function isNetworkRequestSettled(networkStatus) {
|
||||
return networkStatus === 7 || networkStatus === 8;
|
||||
}
|
||||
//# sourceMappingURL=networkStatus.js.map
|
||||
1
graphql-subscription/node_modules/@apollo/client/core/networkStatus.js.map
generated
vendored
Normal file
1
graphql-subscription/node_modules/@apollo/client/core/networkStatus.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"networkStatus.js","sourceRoot":"","sources":["../../src/core/networkStatus.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,CAAN,IAAY,aA0CX;AA1CD,WAAY,aAAa;IACvB;;;;OAIG;IACH,uDAAW,CAAA;IAEX;;;OAGG;IACH,iEAAgB,CAAA;IAEhB;;;OAGG;IACH,2DAAa,CAAA;IAEb;;;OAGG;IACH,uDAAW,CAAA;IAEX;;;;OAIG;IACH,iDAAQ,CAAA;IAER;;OAEG;IACH,mDAAS,CAAA;IAET;;OAEG;IACH,mDAAS,CAAA;AACX,CAAC,EA1CW,aAAa,KAAb,aAAa,QA0CxB;AAED;;;GAGG;AACH,MAAM,UAAU,wBAAwB,CACtC,aAA6B;IAE7B,OAAO,aAAa,CAAC,CAAC,CAAC,aAAa,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC;AACnD,CAAC;AAED;;;GAGG;AACH,MAAM,UAAU,uBAAuB,CACrC,aAA6B;IAE7B,OAAO,aAAa,KAAK,CAAC,IAAI,aAAa,KAAK,CAAC,CAAC;AACpD,CAAC","sourcesContent":["/**\n * The current status of a query’s execution in our system.\n */\nexport enum NetworkStatus {\n /**\n * The query has never been run before and the query is now currently running. A query will still\n * have this network status even if a partial data result was returned from the cache, but a\n * query was dispatched anyway.\n */\n loading = 1,\n\n /**\n * If `setVariables` was called and a query was fired because of that then the network status\n * will be `setVariables` until the result of that query comes back.\n */\n setVariables = 2,\n\n /**\n * Indicates that `fetchMore` was called on this query and that the query created is currently in\n * flight.\n */\n fetchMore = 3,\n\n /**\n * Similar to the `setVariables` network status. It means that `refetch` was called on a query\n * and the refetch request is currently in flight.\n */\n refetch = 4,\n\n /**\n * Indicates that a polling query is currently in flight. So for example if you are polling a\n * query every 10 seconds then the network status will switch to `poll` every 10 seconds whenever\n * a poll request has been sent but not resolved.\n */\n poll = 6,\n\n /**\n * No request is in flight for this query, and no errors happened. Everything is OK.\n */\n ready = 7,\n\n /**\n * No request is in flight for this query, but one or more errors were detected.\n */\n error = 8,\n}\n\n/**\n * Returns true if there is currently a network request in flight according to a given network\n * status.\n */\nexport function isNetworkRequestInFlight(\n networkStatus?: NetworkStatus\n): boolean {\n return networkStatus ? networkStatus < 7 : false;\n}\n\n/**\n * Returns true if the network request is in ready or error state according to a given network\n * status.\n */\nexport function isNetworkRequestSettled(\n networkStatus?: NetworkStatus\n): boolean {\n return networkStatus === 7 || networkStatus === 8;\n}\n"]}
|
||||
8
graphql-subscription/node_modules/@apollo/client/core/package.json
generated
vendored
Normal file
8
graphql-subscription/node_modules/@apollo/client/core/package.json
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "@apollo/client/core",
|
||||
"type": "module",
|
||||
"main": "core.cjs",
|
||||
"module": "index.js",
|
||||
"types": "index.d.ts",
|
||||
"sideEffects": false
|
||||
}
|
||||
86
graphql-subscription/node_modules/@apollo/client/core/types.d.ts
generated
vendored
Normal file
86
graphql-subscription/node_modules/@apollo/client/core/types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
import type { DocumentNode, GraphQLError } from "graphql";
|
||||
import type { ApolloCache } from "../cache/index.js";
|
||||
import type { FetchResult } from "../link/core/index.js";
|
||||
import type { ApolloError } from "../errors/index.js";
|
||||
import type { QueryInfo } from "./QueryInfo.js";
|
||||
import type { NetworkStatus } from "./networkStatus.js";
|
||||
import type { Resolver } from "./LocalState.js";
|
||||
import type { ObservableQuery } from "./ObservableQuery.js";
|
||||
import type { QueryOptions } from "./watchQueryOptions.js";
|
||||
import type { Cache } from "../cache/index.js";
|
||||
import type { IsStrictlyAny } from "../utilities/index.js";
|
||||
export type { TypedDocumentNode } from "@graphql-typed-document-node/core";
|
||||
export type MethodKeys<T> = {
|
||||
[P in keyof T]: T[P] extends Function ? P : never;
|
||||
}[keyof T];
|
||||
export interface DefaultContext extends Record<string, any> {
|
||||
}
|
||||
export type QueryListener = (queryInfo: QueryInfo) => void;
|
||||
export type OnQueryUpdated<TResult> = (observableQuery: ObservableQuery<any>, diff: Cache.DiffResult<any>, lastDiff: Cache.DiffResult<any> | undefined) => boolean | TResult;
|
||||
export type RefetchQueryDescriptor = string | DocumentNode;
|
||||
export type InternalRefetchQueryDescriptor = RefetchQueryDescriptor | QueryOptions;
|
||||
type RefetchQueriesIncludeShorthand = "all" | "active";
|
||||
export type RefetchQueriesInclude = RefetchQueryDescriptor[] | RefetchQueriesIncludeShorthand;
|
||||
export type InternalRefetchQueriesInclude = InternalRefetchQueryDescriptor[] | RefetchQueriesIncludeShorthand;
|
||||
export interface RefetchQueriesOptions<TCache extends ApolloCache<any>, TResult> {
|
||||
updateCache?: (cache: TCache) => void;
|
||||
include?: RefetchQueriesInclude;
|
||||
optimistic?: boolean;
|
||||
onQueryUpdated?: OnQueryUpdated<TResult> | null;
|
||||
}
|
||||
export type RefetchQueriesPromiseResults<TResult> = IsStrictlyAny<TResult> extends true ? any[] : TResult extends boolean ? ApolloQueryResult<any>[] : TResult extends PromiseLike<infer U> ? U[] : TResult[];
|
||||
export interface RefetchQueriesResult<TResult> extends Promise<RefetchQueriesPromiseResults<TResult>> {
|
||||
queries: ObservableQuery<any>[];
|
||||
results: InternalRefetchQueriesResult<TResult>[];
|
||||
}
|
||||
export interface InternalRefetchQueriesOptions<TCache extends ApolloCache<any>, TResult> extends Omit<RefetchQueriesOptions<TCache, TResult>, "include"> {
|
||||
include?: InternalRefetchQueriesInclude;
|
||||
removeOptimistic?: string;
|
||||
}
|
||||
export type InternalRefetchQueriesResult<TResult> = TResult extends boolean ? Promise<ApolloQueryResult<any>> : TResult;
|
||||
export type InternalRefetchQueriesMap<TResult> = Map<ObservableQuery<any>, InternalRefetchQueriesResult<TResult>>;
|
||||
export type { QueryOptions as PureQueryOptions };
|
||||
export type OperationVariables = Record<string, any>;
|
||||
export interface ApolloQueryResult<T> {
|
||||
data: T;
|
||||
/**
|
||||
* A list of any errors that occurred during server-side execution of a GraphQL operation.
|
||||
* See https://www.apollographql.com/docs/react/data/error-handling/ for more information.
|
||||
*/
|
||||
errors?: ReadonlyArray<GraphQLError>;
|
||||
/**
|
||||
* The single Error object that is passed to onError and useQuery hooks, and is often thrown during manual `client.query` calls.
|
||||
* This will contain both a NetworkError field and any GraphQLErrors.
|
||||
* See https://www.apollographql.com/docs/react/data/error-handling/ for more information.
|
||||
*/
|
||||
error?: ApolloError;
|
||||
loading: boolean;
|
||||
networkStatus: NetworkStatus;
|
||||
partial?: boolean;
|
||||
}
|
||||
export type MutationQueryReducer<T> = (previousResult: Record<string, any>, options: {
|
||||
mutationResult: FetchResult<T>;
|
||||
queryName: string | undefined;
|
||||
queryVariables: Record<string, any>;
|
||||
}) => Record<string, any>;
|
||||
export type MutationQueryReducersMap<T = {
|
||||
[key: string]: any;
|
||||
}> = {
|
||||
[queryName: string]: MutationQueryReducer<T>;
|
||||
};
|
||||
/**
|
||||
* @deprecated Use `MutationUpdaterFunction` instead.
|
||||
*/
|
||||
export type MutationUpdaterFn<T = {
|
||||
[key: string]: any;
|
||||
}> = (cache: ApolloCache<T>, mutationResult: FetchResult<T>) => void;
|
||||
export type MutationUpdaterFunction<TData, TVariables, TContext, TCache extends ApolloCache<any>> = (cache: TCache, result: Omit<FetchResult<TData>, "context">, options: {
|
||||
context?: TContext;
|
||||
variables?: TVariables;
|
||||
}) => void;
|
||||
export interface Resolvers {
|
||||
[key: string]: {
|
||||
[field: string]: Resolver;
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=types.d.ts.map
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user