Store.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. //===- Store.cpp - Interface for maps from Locations to Values ------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file defined the types Store and StoreManager.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
  14. #include "clang/AST/ASTContext.h"
  15. #include "clang/AST/CXXInheritance.h"
  16. #include "clang/AST/CharUnits.h"
  17. #include "clang/AST/Decl.h"
  18. #include "clang/AST/DeclCXX.h"
  19. #include "clang/AST/DeclObjC.h"
  20. #include "clang/AST/Expr.h"
  21. #include "clang/AST/Type.h"
  22. #include "clang/Basic/LLVM.h"
  23. #include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
  24. #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
  25. #include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
  26. #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
  27. #include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
  28. #include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
  29. #include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
  30. #include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
  31. #include "llvm/ADT/APSInt.h"
  32. #include "llvm/ADT/Optional.h"
  33. #include "llvm/ADT/SmallVector.h"
  34. #include "llvm/Support/Casting.h"
  35. #include "llvm/Support/ErrorHandling.h"
  36. #include <cassert>
  37. #include <cstdint>
  38. using namespace clang;
  39. using namespace ento;
  40. StoreManager::StoreManager(ProgramStateManager &stateMgr)
  41. : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
  42. MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
  43. StoreRef StoreManager::enterStackFrame(Store OldStore,
  44. const CallEvent &Call,
  45. const StackFrameContext *LCtx) {
  46. StoreRef Store = StoreRef(OldStore, *this);
  47. SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings;
  48. Call.getInitialStackFrameContents(LCtx, InitialBindings);
  49. for (const auto &I : InitialBindings)
  50. Store = Bind(Store.getStore(), I.first, I.second);
  51. return Store;
  52. }
  53. const ElementRegion *StoreManager::MakeElementRegion(const SubRegion *Base,
  54. QualType EleTy,
  55. uint64_t index) {
  56. NonLoc idx = svalBuilder.makeArrayIndex(index);
  57. return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
  58. }
  59. const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R,
  60. QualType T) {
  61. NonLoc idx = svalBuilder.makeZeroArrayIndex();
  62. assert(!T.isNull());
  63. return MRMgr.getElementRegion(T, idx, R, Ctx);
  64. }
  65. const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
  66. ASTContext &Ctx = StateMgr.getContext();
  67. // Handle casts to Objective-C objects.
  68. if (CastToTy->isObjCObjectPointerType())
  69. return R->StripCasts();
  70. if (CastToTy->isBlockPointerType()) {
  71. // FIXME: We may need different solutions, depending on the symbol
  72. // involved. Blocks can be casted to/from 'id', as they can be treated
  73. // as Objective-C objects. This could possibly be handled by enhancing
  74. // our reasoning of downcasts of symbolic objects.
  75. if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
  76. return R;
  77. // We don't know what to make of it. Return a NULL region, which
  78. // will be interpretted as UnknownVal.
  79. return nullptr;
  80. }
  81. // Now assume we are casting from pointer to pointer. Other cases should
  82. // already be handled.
  83. QualType PointeeTy = CastToTy->getPointeeType();
  84. QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
  85. // Handle casts to void*. We just pass the region through.
  86. if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy)
  87. return R;
  88. // Handle casts from compatible types.
  89. if (R->isBoundable())
  90. if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
  91. QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
  92. if (CanonPointeeTy == ObjTy)
  93. return R;
  94. }
  95. // Process region cast according to the kind of the region being cast.
  96. switch (R->getKind()) {
  97. case MemRegion::CXXThisRegionKind:
  98. case MemRegion::CodeSpaceRegionKind:
  99. case MemRegion::StackLocalsSpaceRegionKind:
  100. case MemRegion::StackArgumentsSpaceRegionKind:
  101. case MemRegion::HeapSpaceRegionKind:
  102. case MemRegion::UnknownSpaceRegionKind:
  103. case MemRegion::StaticGlobalSpaceRegionKind:
  104. case MemRegion::GlobalInternalSpaceRegionKind:
  105. case MemRegion::GlobalSystemSpaceRegionKind:
  106. case MemRegion::GlobalImmutableSpaceRegionKind: {
  107. llvm_unreachable("Invalid region cast");
  108. }
  109. case MemRegion::FunctionCodeRegionKind:
  110. case MemRegion::BlockCodeRegionKind:
  111. case MemRegion::BlockDataRegionKind:
  112. case MemRegion::StringRegionKind:
  113. // FIXME: Need to handle arbitrary downcasts.
  114. case MemRegion::SymbolicRegionKind:
  115. case MemRegion::AllocaRegionKind:
  116. case MemRegion::CompoundLiteralRegionKind:
  117. case MemRegion::FieldRegionKind:
  118. case MemRegion::ObjCIvarRegionKind:
  119. case MemRegion::ObjCStringRegionKind:
  120. case MemRegion::VarRegionKind:
  121. case MemRegion::CXXTempObjectRegionKind:
  122. case MemRegion::CXXBaseObjectRegionKind:
  123. case MemRegion::CXXDerivedObjectRegionKind:
  124. return MakeElementRegion(cast<SubRegion>(R), PointeeTy);
  125. case MemRegion::ElementRegionKind: {
  126. // If we are casting from an ElementRegion to another type, the
  127. // algorithm is as follows:
  128. //
  129. // (1) Compute the "raw offset" of the ElementRegion from the
  130. // base region. This is done by calling 'getAsRawOffset()'.
  131. //
  132. // (2a) If we get a 'RegionRawOffset' after calling
  133. // 'getAsRawOffset()', determine if the absolute offset
  134. // can be exactly divided into chunks of the size of the
  135. // casted-pointee type. If so, create a new ElementRegion with
  136. // the pointee-cast type as the new ElementType and the index
  137. // being the offset divded by the chunk size. If not, create
  138. // a new ElementRegion at offset 0 off the raw offset region.
  139. //
  140. // (2b) If we don't a get a 'RegionRawOffset' after calling
  141. // 'getAsRawOffset()', it means that we are at offset 0.
  142. //
  143. // FIXME: Handle symbolic raw offsets.
  144. const ElementRegion *elementR = cast<ElementRegion>(R);
  145. const RegionRawOffset &rawOff = elementR->getAsArrayOffset();
  146. const MemRegion *baseR = rawOff.getRegion();
  147. // If we cannot compute a raw offset, throw up our hands and return
  148. // a NULL MemRegion*.
  149. if (!baseR)
  150. return nullptr;
  151. CharUnits off = rawOff.getOffset();
  152. if (off.isZero()) {
  153. // Edge case: we are at 0 bytes off the beginning of baseR. We
  154. // check to see if type we are casting to is the same as the base
  155. // region. If so, just return the base region.
  156. if (const auto *TR = dyn_cast<TypedValueRegion>(baseR)) {
  157. QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
  158. QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
  159. if (CanonPointeeTy == ObjTy)
  160. return baseR;
  161. }
  162. // Otherwise, create a new ElementRegion at offset 0.
  163. return MakeElementRegion(cast<SubRegion>(baseR), PointeeTy);
  164. }
  165. // We have a non-zero offset from the base region. We want to determine
  166. // if the offset can be evenly divided by sizeof(PointeeTy). If so,
  167. // we create an ElementRegion whose index is that value. Otherwise, we
  168. // create two ElementRegions, one that reflects a raw offset and the other
  169. // that reflects the cast.
  170. // Compute the index for the new ElementRegion.
  171. int64_t newIndex = 0;
  172. const MemRegion *newSuperR = nullptr;
  173. // We can only compute sizeof(PointeeTy) if it is a complete type.
  174. if (!PointeeTy->isIncompleteType()) {
  175. // Compute the size in **bytes**.
  176. CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
  177. if (!pointeeTySize.isZero()) {
  178. // Is the offset a multiple of the size? If so, we can layer the
  179. // ElementRegion (with elementType == PointeeTy) directly on top of
  180. // the base region.
  181. if (off % pointeeTySize == 0) {
  182. newIndex = off / pointeeTySize;
  183. newSuperR = baseR;
  184. }
  185. }
  186. }
  187. if (!newSuperR) {
  188. // Create an intermediate ElementRegion to represent the raw byte.
  189. // This will be the super region of the final ElementRegion.
  190. newSuperR = MakeElementRegion(cast<SubRegion>(baseR), Ctx.CharTy,
  191. off.getQuantity());
  192. }
  193. return MakeElementRegion(cast<SubRegion>(newSuperR), PointeeTy, newIndex);
  194. }
  195. }
  196. llvm_unreachable("unreachable");
  197. }
  198. static bool regionMatchesCXXRecordType(SVal V, QualType Ty) {
  199. const MemRegion *MR = V.getAsRegion();
  200. if (!MR)
  201. return true;
  202. const auto *TVR = dyn_cast<TypedValueRegion>(MR);
  203. if (!TVR)
  204. return true;
  205. const CXXRecordDecl *RD = TVR->getValueType()->getAsCXXRecordDecl();
  206. if (!RD)
  207. return true;
  208. const CXXRecordDecl *Expected = Ty->getPointeeCXXRecordDecl();
  209. if (!Expected)
  210. Expected = Ty->getAsCXXRecordDecl();
  211. return Expected->getCanonicalDecl() == RD->getCanonicalDecl();
  212. }
  213. SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
  214. // Sanity check to avoid doing the wrong thing in the face of
  215. // reinterpret_cast.
  216. if (!regionMatchesCXXRecordType(Derived, Cast->getSubExpr()->getType()))
  217. return UnknownVal();
  218. // Walk through the cast path to create nested CXXBaseRegions.
  219. SVal Result = Derived;
  220. for (CastExpr::path_const_iterator I = Cast->path_begin(),
  221. E = Cast->path_end();
  222. I != E; ++I) {
  223. Result = evalDerivedToBase(Result, (*I)->getType(), (*I)->isVirtual());
  224. }
  225. return Result;
  226. }
  227. SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) {
  228. // Walk through the path to create nested CXXBaseRegions.
  229. SVal Result = Derived;
  230. for (const auto &I : Path)
  231. Result = evalDerivedToBase(Result, I.Base->getType(),
  232. I.Base->isVirtual());
  233. return Result;
  234. }
  235. SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
  236. bool IsVirtual) {
  237. const MemRegion *DerivedReg = Derived.getAsRegion();
  238. if (!DerivedReg)
  239. return Derived;
  240. const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl();
  241. if (!BaseDecl)
  242. BaseDecl = BaseType->getAsCXXRecordDecl();
  243. assert(BaseDecl && "not a C++ object?");
  244. if (const auto *AlreadyDerivedReg =
  245. dyn_cast<CXXDerivedObjectRegion>(DerivedReg)) {
  246. if (const auto *SR =
  247. dyn_cast<SymbolicRegion>(AlreadyDerivedReg->getSuperRegion()))
  248. if (SR->getSymbol()->getType()->getPointeeCXXRecordDecl() == BaseDecl)
  249. return loc::MemRegionVal(SR);
  250. DerivedReg = AlreadyDerivedReg->getSuperRegion();
  251. }
  252. const MemRegion *BaseReg = MRMgr.getCXXBaseObjectRegion(
  253. BaseDecl, cast<SubRegion>(DerivedReg), IsVirtual);
  254. return loc::MemRegionVal(BaseReg);
  255. }
  256. /// Returns the static type of the given region, if it represents a C++ class
  257. /// object.
  258. ///
  259. /// This handles both fully-typed regions, where the dynamic type is known, and
  260. /// symbolic regions, where the dynamic type is merely bounded (and even then,
  261. /// only ostensibly!), but does not take advantage of any dynamic type info.
  262. static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
  263. if (const auto *TVR = dyn_cast<TypedValueRegion>(MR))
  264. return TVR->getValueType()->getAsCXXRecordDecl();
  265. if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
  266. return SR->getSymbol()->getType()->getPointeeCXXRecordDecl();
  267. return nullptr;
  268. }
  269. SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
  270. bool &Failed) {
  271. Failed = false;
  272. const MemRegion *MR = Base.getAsRegion();
  273. if (!MR)
  274. return UnknownVal();
  275. // Assume the derived class is a pointer or a reference to a CXX record.
  276. TargetType = TargetType->getPointeeType();
  277. assert(!TargetType.isNull());
  278. const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl();
  279. if (!TargetClass && !TargetType->isVoidType())
  280. return UnknownVal();
  281. // Drill down the CXXBaseObject chains, which represent upcasts (casts from
  282. // derived to base).
  283. while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) {
  284. // If found the derived class, the cast succeeds.
  285. if (MRClass == TargetClass)
  286. return loc::MemRegionVal(MR);
  287. // We skip over incomplete types. They must be the result of an earlier
  288. // reinterpret_cast, as one can only dynamic_cast between types in the same
  289. // class hierarchy.
  290. if (!TargetType->isVoidType() && MRClass->hasDefinition()) {
  291. // Static upcasts are marked as DerivedToBase casts by Sema, so this will
  292. // only happen when multiple or virtual inheritance is involved.
  293. CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
  294. /*DetectVirtual=*/false);
  295. if (MRClass->isDerivedFrom(TargetClass, Paths))
  296. return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front());
  297. }
  298. if (const auto *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) {
  299. // Drill down the chain to get the derived classes.
  300. MR = BaseR->getSuperRegion();
  301. continue;
  302. }
  303. // If this is a cast to void*, return the region.
  304. if (TargetType->isVoidType())
  305. return loc::MemRegionVal(MR);
  306. // Strange use of reinterpret_cast can give us paths we don't reason
  307. // about well, by putting in ElementRegions where we'd expect
  308. // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the
  309. // derived class has a zero offset from the base class), then it's safe
  310. // to strip the cast; if it's invalid, -Wreinterpret-base-class should
  311. // catch it. In the interest of performance, the analyzer will silently
  312. // do the wrong thing in the invalid case (because offsets for subregions
  313. // will be wrong).
  314. const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false);
  315. if (Uncasted == MR) {
  316. // We reached the bottom of the hierarchy and did not find the derived
  317. // class. We must be casting the base to derived, so the cast should
  318. // fail.
  319. break;
  320. }
  321. MR = Uncasted;
  322. }
  323. if (const auto *SR = dyn_cast<SymbolicRegion>(MR)) {
  324. return loc::MemRegionVal(MRMgr.getCXXDerivedObjectRegion(TargetClass, SR));
  325. }
  326. // We failed if the region we ended up with has perfect type info.
  327. Failed = isa<TypedValueRegion>(MR);
  328. return UnknownVal();
  329. }
  330. /// CastRetrievedVal - Used by subclasses of StoreManager to implement
  331. /// implicit casts that arise from loads from regions that are reinterpreted
  332. /// as another region.
  333. SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
  334. QualType castTy) {
  335. if (castTy.isNull() || V.isUnknownOrUndef())
  336. return V;
  337. // When retrieving symbolic pointer and expecting a non-void pointer,
  338. // wrap them into element regions of the expected type if necessary.
  339. // SValBuilder::dispatchCast() doesn't do that, but it is necessary to
  340. // make sure that the retrieved value makes sense, because there's no other
  341. // cast in the AST that would tell us to cast it to the correct pointer type.
  342. // We might need to do that for non-void pointers as well.
  343. // FIXME: We really need a single good function to perform casts for us
  344. // correctly every time we need it.
  345. if (castTy->isPointerType() && !castTy->isVoidPointerType())
  346. if (const auto *SR = dyn_cast_or_null<SymbolicRegion>(V.getAsRegion()))
  347. if (SR->getSymbol()->getType().getCanonicalType() !=
  348. castTy.getCanonicalType())
  349. return loc::MemRegionVal(castRegion(SR, castTy));
  350. return svalBuilder.dispatchCast(V, castTy);
  351. }
  352. SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
  353. if (Base.isUnknownOrUndef())
  354. return Base;
  355. Loc BaseL = Base.castAs<Loc>();
  356. const SubRegion* BaseR = nullptr;
  357. switch (BaseL.getSubKind()) {
  358. case loc::MemRegionValKind:
  359. BaseR = cast<SubRegion>(BaseL.castAs<loc::MemRegionVal>().getRegion());
  360. break;
  361. case loc::GotoLabelKind:
  362. // These are anormal cases. Flag an undefined value.
  363. return UndefinedVal();
  364. case loc::ConcreteIntKind:
  365. // While these seem funny, this can happen through casts.
  366. // FIXME: What we should return is the field offset, not base. For example,
  367. // add the field offset to the integer value. That way things
  368. // like this work properly: &(((struct foo *) 0xa)->f)
  369. // However, that's not easy to fix without reducing our abilities
  370. // to catch null pointer dereference. Eg., ((struct foo *)0x0)->f = 7
  371. // is a null dereference even though we're dereferencing offset of f
  372. // rather than null. Coming up with an approach that computes offsets
  373. // over null pointers properly while still being able to catch null
  374. // dereferences might be worth it.
  375. return Base;
  376. default:
  377. llvm_unreachable("Unhandled Base.");
  378. }
  379. // NOTE: We must have this check first because ObjCIvarDecl is a subclass
  380. // of FieldDecl.
  381. if (const auto *ID = dyn_cast<ObjCIvarDecl>(D))
  382. return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
  383. return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
  384. }
  385. SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
  386. return getLValueFieldOrIvar(decl, base);
  387. }
  388. SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
  389. SVal Base) {
  390. // If the base is an unknown or undefined value, just return it back.
  391. // FIXME: For absolute pointer addresses, we just return that value back as
  392. // well, although in reality we should return the offset added to that
  393. // value. See also the similar FIXME in getLValueFieldOrIvar().
  394. if (Base.isUnknownOrUndef() || Base.getAs<loc::ConcreteInt>())
  395. return Base;
  396. if (Base.getAs<loc::GotoLabel>())
  397. return UnknownVal();
  398. const SubRegion *BaseRegion =
  399. Base.castAs<loc::MemRegionVal>().getRegionAs<SubRegion>();
  400. // Pointer of any type can be cast and used as array base.
  401. const auto *ElemR = dyn_cast<ElementRegion>(BaseRegion);
  402. // Convert the offset to the appropriate size and signedness.
  403. Offset = svalBuilder.convertToArrayIndex(Offset).castAs<NonLoc>();
  404. if (!ElemR) {
  405. // If the base region is not an ElementRegion, create one.
  406. // This can happen in the following example:
  407. //
  408. // char *p = __builtin_alloc(10);
  409. // p[1] = 8;
  410. //
  411. // Observe that 'p' binds to an AllocaRegion.
  412. return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
  413. BaseRegion, Ctx));
  414. }
  415. SVal BaseIdx = ElemR->getIndex();
  416. if (!BaseIdx.getAs<nonloc::ConcreteInt>())
  417. return UnknownVal();
  418. const llvm::APSInt &BaseIdxI =
  419. BaseIdx.castAs<nonloc::ConcreteInt>().getValue();
  420. // Only allow non-integer offsets if the base region has no offset itself.
  421. // FIXME: This is a somewhat arbitrary restriction. We should be using
  422. // SValBuilder here to add the two offsets without checking their types.
  423. if (!Offset.getAs<nonloc::ConcreteInt>()) {
  424. if (isa<ElementRegion>(BaseRegion->StripCasts()))
  425. return UnknownVal();
  426. return loc::MemRegionVal(MRMgr.getElementRegion(
  427. elementType, Offset, cast<SubRegion>(ElemR->getSuperRegion()), Ctx));
  428. }
  429. const llvm::APSInt& OffI = Offset.castAs<nonloc::ConcreteInt>().getValue();
  430. assert(BaseIdxI.isSigned());
  431. // Compute the new index.
  432. nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI +
  433. OffI));
  434. // Construct the new ElementRegion.
  435. const SubRegion *ArrayR = cast<SubRegion>(ElemR->getSuperRegion());
  436. return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
  437. Ctx));
  438. }
  439. StoreManager::BindingsHandler::~BindingsHandler() = default;
  440. bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
  441. Store store,
  442. const MemRegion* R,
  443. SVal val) {
  444. SymbolRef SymV = val.getAsLocSymbol();
  445. if (!SymV || SymV != Sym)
  446. return true;
  447. if (Binding) {
  448. First = false;
  449. return false;
  450. }
  451. else
  452. Binding = R;
  453. return true;
  454. }